langchain-core 0.3.75__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (116) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +19 -6
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/manager.py +81 -69
  9. langchain_core/callbacks/usage.py +4 -2
  10. langchain_core/chat_history.py +4 -6
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +3 -0
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +56 -44
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +94 -25
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +49 -17
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +36 -19
  33. langchain_core/messages/base.py +13 -6
  34. langchain_core/messages/content_blocks.py +23 -2
  35. langchain_core/messages/human.py +2 -6
  36. langchain_core/messages/system.py +2 -6
  37. langchain_core/messages/tool.py +33 -13
  38. langchain_core/messages/utils.py +182 -72
  39. langchain_core/output_parsers/base.py +5 -2
  40. langchain_core/output_parsers/json.py +4 -4
  41. langchain_core/output_parsers/list.py +7 -22
  42. langchain_core/output_parsers/openai_functions.py +3 -0
  43. langchain_core/output_parsers/openai_tools.py +6 -1
  44. langchain_core/output_parsers/pydantic.py +4 -0
  45. langchain_core/output_parsers/string.py +5 -1
  46. langchain_core/output_parsers/xml.py +19 -19
  47. langchain_core/outputs/chat_generation.py +18 -7
  48. langchain_core/outputs/generation.py +14 -3
  49. langchain_core/outputs/llm_result.py +8 -1
  50. langchain_core/prompt_values.py +10 -4
  51. langchain_core/prompts/base.py +4 -9
  52. langchain_core/prompts/chat.py +87 -58
  53. langchain_core/prompts/dict.py +16 -8
  54. langchain_core/prompts/few_shot.py +9 -11
  55. langchain_core/prompts/few_shot_with_templates.py +5 -1
  56. langchain_core/prompts/image.py +12 -5
  57. langchain_core/prompts/message.py +5 -6
  58. langchain_core/prompts/pipeline.py +13 -8
  59. langchain_core/prompts/prompt.py +22 -8
  60. langchain_core/prompts/string.py +18 -10
  61. langchain_core/prompts/structured.py +7 -2
  62. langchain_core/rate_limiters.py +2 -2
  63. langchain_core/retrievers.py +7 -6
  64. langchain_core/runnables/base.py +402 -183
  65. langchain_core/runnables/branch.py +14 -19
  66. langchain_core/runnables/config.py +9 -15
  67. langchain_core/runnables/configurable.py +34 -19
  68. langchain_core/runnables/fallbacks.py +20 -13
  69. langchain_core/runnables/graph.py +44 -37
  70. langchain_core/runnables/graph_ascii.py +40 -17
  71. langchain_core/runnables/graph_mermaid.py +27 -15
  72. langchain_core/runnables/graph_png.py +27 -31
  73. langchain_core/runnables/history.py +55 -58
  74. langchain_core/runnables/passthrough.py +44 -21
  75. langchain_core/runnables/retry.py +9 -5
  76. langchain_core/runnables/router.py +9 -8
  77. langchain_core/runnables/schema.py +2 -0
  78. langchain_core/runnables/utils.py +51 -89
  79. langchain_core/stores.py +13 -25
  80. langchain_core/sys_info.py +9 -8
  81. langchain_core/tools/base.py +30 -23
  82. langchain_core/tools/convert.py +24 -13
  83. langchain_core/tools/simple.py +35 -3
  84. langchain_core/tools/structured.py +25 -2
  85. langchain_core/tracers/base.py +2 -2
  86. langchain_core/tracers/context.py +5 -1
  87. langchain_core/tracers/core.py +109 -39
  88. langchain_core/tracers/evaluation.py +22 -26
  89. langchain_core/tracers/event_stream.py +40 -27
  90. langchain_core/tracers/langchain.py +12 -3
  91. langchain_core/tracers/langchain_v1.py +10 -2
  92. langchain_core/tracers/log_stream.py +56 -17
  93. langchain_core/tracers/root_listeners.py +4 -20
  94. langchain_core/tracers/run_collector.py +6 -16
  95. langchain_core/tracers/schemas.py +5 -1
  96. langchain_core/utils/aiter.py +14 -6
  97. langchain_core/utils/env.py +3 -0
  98. langchain_core/utils/function_calling.py +37 -20
  99. langchain_core/utils/interactive_env.py +6 -2
  100. langchain_core/utils/iter.py +11 -3
  101. langchain_core/utils/json.py +5 -2
  102. langchain_core/utils/json_schema.py +15 -5
  103. langchain_core/utils/loading.py +5 -1
  104. langchain_core/utils/mustache.py +24 -15
  105. langchain_core/utils/pydantic.py +32 -4
  106. langchain_core/utils/utils.py +24 -8
  107. langchain_core/vectorstores/base.py +7 -20
  108. langchain_core/vectorstores/in_memory.py +18 -12
  109. langchain_core/vectorstores/utils.py +18 -12
  110. langchain_core/version.py +1 -1
  111. langchain_core-0.3.76.dist-info/METADATA +77 -0
  112. langchain_core-0.3.76.dist-info/RECORD +174 -0
  113. langchain_core-0.3.75.dist-info/METADATA +0 -106
  114. langchain_core-0.3.75.dist-info/RECORD +0 -174
  115. {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  116. {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
@@ -67,10 +67,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
67
67
  from langchain_core.prompts import MessagesPlaceholder
68
68
 
69
69
  prompt = MessagesPlaceholder("history")
70
- prompt.format_messages() # raises KeyError
70
+ prompt.format_messages() # raises KeyError
71
71
 
72
72
  prompt = MessagesPlaceholder("history", optional=True)
73
- prompt.format_messages() # returns empty list []
73
+ prompt.format_messages() # returns empty list []
74
74
 
75
75
  prompt.format_messages(
76
76
  history=[
@@ -93,14 +93,14 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
93
93
  [
94
94
  ("system", "You are a helpful assistant."),
95
95
  MessagesPlaceholder("history"),
96
- ("human", "{question}")
96
+ ("human", "{question}"),
97
97
  ]
98
98
  )
99
99
  prompt.invoke(
100
- {
101
- "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
102
- "question": "now multiply that by 4"
103
- }
100
+ {
101
+ "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
102
+ "question": "now multiply that by 4",
103
+ }
104
104
  )
105
105
  # -> ChatPromptValue(messages=[
106
106
  # SystemMessage(content="You are a helpful assistant."),
@@ -740,10 +740,18 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
740
740
 
741
741
  @abstractmethod
742
742
  def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
743
- """Format kwargs into a list of messages."""
743
+ """Format kwargs into a list of messages.
744
+
745
+ Returns:
746
+ List of messages.
747
+ """
744
748
 
745
749
  async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
746
- """Async format kwargs into a list of messages."""
750
+ """Async format kwargs into a list of messages.
751
+
752
+ Returns:
753
+ List of messages.
754
+ """
747
755
  return self.format_messages(**kwargs)
748
756
 
749
757
  def pretty_repr(
@@ -795,18 +803,17 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
795
803
 
796
804
  from langchain_core.prompts import ChatPromptTemplate
797
805
 
798
- template = ChatPromptTemplate([
799
- ("system", "You are a helpful AI bot. Your name is {name}."),
800
- ("human", "Hello, how are you doing?"),
801
- ("ai", "I'm doing well, thanks!"),
802
- ("human", "{user_input}"),
803
- ])
806
+ template = ChatPromptTemplate(
807
+ [
808
+ ("system", "You are a helpful AI bot. Your name is {name}."),
809
+ ("human", "Hello, how are you doing?"),
810
+ ("ai", "I'm doing well, thanks!"),
811
+ ("human", "{user_input}"),
812
+ ]
813
+ )
804
814
 
805
815
  prompt_value = template.invoke(
806
- {
807
- "name": "Bob",
808
- "user_input": "What is your name?"
809
- }
816
+ {"name": "Bob", "user_input": "What is your name?"}
810
817
  )
811
818
  # Output:
812
819
  # ChatPromptValue(
@@ -816,7 +823,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
816
823
  # AIMessage(content="I'm doing well, thanks!"),
817
824
  # HumanMessage(content='What is your name?')
818
825
  # ]
819
- #)
826
+ # )
820
827
 
821
828
  Messages Placeholder:
822
829
 
@@ -826,14 +833,16 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
826
833
  # you can initialize the template with a MessagesPlaceholder
827
834
  # either using the class directly or with the shorthand tuple syntax:
828
835
 
829
- template = ChatPromptTemplate([
830
- ("system", "You are a helpful AI bot."),
831
- # Means the template will receive an optional list of messages under
832
- # the "conversation" key
833
- ("placeholder", "{conversation}")
834
- # Equivalently:
835
- # MessagesPlaceholder(variable_name="conversation", optional=True)
836
- ])
836
+ template = ChatPromptTemplate(
837
+ [
838
+ ("system", "You are a helpful AI bot."),
839
+ # Means the template will receive an optional list of messages under
840
+ # the "conversation" key
841
+ ("placeholder", "{conversation}"),
842
+ # Equivalently:
843
+ # MessagesPlaceholder(variable_name="conversation", optional=True)
844
+ ]
845
+ )
837
846
 
838
847
  prompt_value = template.invoke(
839
848
  {
@@ -841,7 +850,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
841
850
  ("human", "Hi!"),
842
851
  ("ai", "How can I assist you today?"),
843
852
  ("human", "Can you make me an ice cream sundae?"),
844
- ("ai", "No.")
853
+ ("ai", "No."),
845
854
  ]
846
855
  }
847
856
  )
@@ -855,7 +864,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
855
864
  # HumanMessage(content='Can you make me an ice cream sundae?'),
856
865
  # AIMessage(content='No.'),
857
866
  # ]
858
- #)
867
+ # )
859
868
 
860
869
  Single-variable template:
861
870
 
@@ -868,10 +877,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
868
877
 
869
878
  from langchain_core.prompts import ChatPromptTemplate
870
879
 
871
- template = ChatPromptTemplate([
872
- ("system", "You are a helpful AI bot. Your name is Carl."),
873
- ("human", "{user_input}"),
874
- ])
880
+ template = ChatPromptTemplate(
881
+ [
882
+ ("system", "You are a helpful AI bot. Your name is Carl."),
883
+ ("human", "{user_input}"),
884
+ ]
885
+ )
875
886
 
876
887
  prompt_value = template.invoke("Hello, there!")
877
888
  # Equivalent to
@@ -922,28 +933,29 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
922
933
  input_types: A dictionary of the types of the variables the prompt template
923
934
  expects. If not provided, all variables are assumed to be strings.
924
935
 
925
- Returns:
926
- A chat prompt template.
927
-
928
936
  Examples:
929
937
  Instantiation from a list of message templates:
930
938
 
931
939
  .. code-block:: python
932
940
 
933
- template = ChatPromptTemplate([
934
- ("human", "Hello, how are you?"),
935
- ("ai", "I'm doing well, thanks!"),
936
- ("human", "That's good to hear."),
937
- ])
941
+ template = ChatPromptTemplate(
942
+ [
943
+ ("human", "Hello, how are you?"),
944
+ ("ai", "I'm doing well, thanks!"),
945
+ ("human", "That's good to hear."),
946
+ ]
947
+ )
938
948
 
939
949
  Instantiation from mixed message formats:
940
950
 
941
951
  .. code-block:: python
942
952
 
943
- template = ChatPromptTemplate([
944
- SystemMessage(content="hello"),
945
- ("human", "Hello, how are you?"),
946
- ])
953
+ template = ChatPromptTemplate(
954
+ [
955
+ SystemMessage(content="hello"),
956
+ ("human", "Hello, how are you?"),
957
+ ]
958
+ )
947
959
 
948
960
  """
949
961
  messages_ = [
@@ -974,7 +986,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
974
986
 
975
987
  @classmethod
976
988
  def get_lc_namespace(cls) -> list[str]:
977
- """Get the namespace of the langchain object."""
989
+ """Get the namespace of the langchain object.
990
+
991
+ Returns:
992
+ ``["langchain", "prompts", "chat"]``
993
+ """
978
994
  return ["langchain", "prompts", "chat"]
979
995
 
980
996
  def __add__(self, other: Any) -> ChatPromptTemplate:
@@ -1137,20 +1153,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1137
1153
 
1138
1154
  .. code-block:: python
1139
1155
 
1140
- template = ChatPromptTemplate.from_messages([
1141
- ("human", "Hello, how are you?"),
1142
- ("ai", "I'm doing well, thanks!"),
1143
- ("human", "That's good to hear."),
1144
- ])
1156
+ template = ChatPromptTemplate.from_messages(
1157
+ [
1158
+ ("human", "Hello, how are you?"),
1159
+ ("ai", "I'm doing well, thanks!"),
1160
+ ("human", "That's good to hear."),
1161
+ ]
1162
+ )
1145
1163
 
1146
1164
  Instantiation from mixed message formats:
1147
1165
 
1148
1166
  .. code-block:: python
1149
1167
 
1150
- template = ChatPromptTemplate.from_messages([
1151
- SystemMessage(content="hello"),
1152
- ("human", "Hello, how are you?"),
1153
- ])
1168
+ template = ChatPromptTemplate.from_messages(
1169
+ [
1170
+ SystemMessage(content="hello"),
1171
+ ("human", "Hello, how are you?"),
1172
+ ]
1173
+ )
1154
1174
 
1155
1175
  Args:
1156
1176
  messages: sequence of message representations.
@@ -1174,6 +1194,9 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1174
1194
  **kwargs: keyword arguments to use for filling in template variables
1175
1195
  in all the template messages in this chat template.
1176
1196
 
1197
+ Raises:
1198
+ ValueError: if messages are of unexpected types.
1199
+
1177
1200
  Returns:
1178
1201
  list of formatted messages.
1179
1202
  """
@@ -1284,7 +1307,13 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1284
1307
  def __getitem__(
1285
1308
  self, index: Union[int, slice]
1286
1309
  ) -> Union[MessageLike, ChatPromptTemplate]:
1287
- """Use to index into the chat template."""
1310
+ """Use to index into the chat template.
1311
+
1312
+ Returns:
1313
+ If index is an int, returns the message at that index.
1314
+ If index is a slice, returns a new ``ChatPromptTemplate``
1315
+ containing the messages in that slice.
1316
+ """
1288
1317
  if isinstance(index, slice):
1289
1318
  start, stop, step = index.indices(len(self.messages))
1290
1319
  messages = self.messages[start:stop:step]
@@ -1292,7 +1321,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1292
1321
  return self.messages[index]
1293
1322
 
1294
1323
  def __len__(self) -> int:
1295
- """Get the length of the chat template."""
1324
+ """Return the length of the chat template."""
1296
1325
  return len(self.messages)
1297
1326
 
1298
1327
  @property
@@ -31,18 +31,25 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
31
31
  return _get_input_variables(self.template, self.template_format)
32
32
 
33
33
  def format(self, **kwargs: Any) -> dict[str, Any]:
34
- """Format the prompt with the inputs."""
34
+ """Format the prompt with the inputs.
35
+
36
+ Returns:
37
+ A formatted dict.
38
+ """
35
39
  return _insert_input_variables(self.template, kwargs, self.template_format)
36
40
 
37
41
  async def aformat(self, **kwargs: Any) -> dict[str, Any]:
38
- """Format the prompt with the inputs."""
42
+ """Format the prompt with the inputs.
43
+
44
+ Returns:
45
+ A formatted dict.
46
+ """
39
47
  return self.format(**kwargs)
40
48
 
41
49
  @override
42
50
  def invoke(
43
51
  self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
44
52
  ) -> dict:
45
- """Invoke the prompt."""
46
53
  return self._call_with_config(
47
54
  lambda x: self.format(**x),
48
55
  input,
@@ -62,15 +69,16 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
62
69
 
63
70
  @classmethod
64
71
  def is_lc_serializable(cls) -> bool:
65
- """Return whether or not the class is serializable.
66
-
67
- Returns: True.
68
- """
72
+ """Return True as this class is serializable."""
69
73
  return True
70
74
 
71
75
  @classmethod
72
76
  def get_lc_namespace(cls) -> list[str]:
73
- """Serialization namespace."""
77
+ """Get the namespace of the langchain object.
78
+
79
+ Returns:
80
+ ``["langchain_core", "prompts", "dict"]``
81
+ """
74
82
  return ["langchain_core", "prompts", "dict"]
75
83
 
76
84
  def pretty_repr(self, *, html: bool = False) -> str:
@@ -117,7 +117,7 @@ class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
117
117
 
118
118
  @classmethod
119
119
  def is_lc_serializable(cls) -> bool:
120
- """Return whether or not the class is serializable."""
120
+ """Return False as this class is not serializable."""
121
121
  return False
122
122
 
123
123
  validate_template: bool = False
@@ -153,7 +153,7 @@ class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
153
153
  self.template_format,
154
154
  self.input_variables + list(self.partial_variables),
155
155
  )
156
- elif self.template_format or None:
156
+ elif self.template_format:
157
157
  self.input_variables = [
158
158
  var
159
159
  for var in get_template_variables(
@@ -272,7 +272,7 @@ class FewShotChatMessagePromptTemplate(
272
272
 
273
273
  from langchain_core.prompts import (
274
274
  FewShotChatMessagePromptTemplate,
275
- ChatPromptTemplate
275
+ ChatPromptTemplate,
276
276
  )
277
277
 
278
278
  examples = [
@@ -281,7 +281,7 @@ class FewShotChatMessagePromptTemplate(
281
281
  ]
282
282
 
283
283
  example_prompt = ChatPromptTemplate.from_messages(
284
- [('human', 'What is {input}?'), ('ai', '{output}')]
284
+ [("human", "What is {input}?"), ("ai", "{output}")]
285
285
  )
286
286
 
287
287
  few_shot_prompt = FewShotChatMessagePromptTemplate(
@@ -292,9 +292,9 @@ class FewShotChatMessagePromptTemplate(
292
292
 
293
293
  final_prompt = ChatPromptTemplate.from_messages(
294
294
  [
295
- ('system', 'You are a helpful AI Assistant'),
295
+ ("system", "You are a helpful AI Assistant"),
296
296
  few_shot_prompt,
297
- ('human', '{input}'),
297
+ ("human", "{input}"),
298
298
  ]
299
299
  )
300
300
  final_prompt.format(input="What is 4+4?")
@@ -314,10 +314,7 @@ class FewShotChatMessagePromptTemplate(
314
314
  # ...
315
315
  ]
316
316
 
317
- to_vectorize = [
318
- " ".join(example.values())
319
- for example in examples
320
- ]
317
+ to_vectorize = [" ".join(example.values()) for example in examples]
321
318
  embeddings = OpenAIEmbeddings()
322
319
  vectorstore = Chroma.from_texts(
323
320
  to_vectorize, embeddings, metadatas=examples
@@ -355,6 +352,7 @@ class FewShotChatMessagePromptTemplate(
355
352
 
356
353
  # Use within an LLM
357
354
  from langchain_core.chat_models import ChatAnthropic
355
+
358
356
  chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307")
359
357
  chain.invoke({"input": "What's 3+3?"})
360
358
 
@@ -369,7 +367,7 @@ class FewShotChatMessagePromptTemplate(
369
367
 
370
368
  @classmethod
371
369
  def is_lc_serializable(cls) -> bool:
372
- """Return whether or not the class is serializable."""
370
+ """Return False as this class is not serializable."""
373
371
  return False
374
372
 
375
373
  model_config = ConfigDict(
@@ -46,7 +46,11 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
46
46
 
47
47
  @classmethod
48
48
  def get_lc_namespace(cls) -> list[str]:
49
- """Get the namespace of the langchain object."""
49
+ """Get the namespace of the langchain object.
50
+
51
+ Returns:
52
+ ``["langchain", "prompts", "few_shot_with_templates"]``
53
+ """
50
54
  return ["langchain", "prompts", "few_shot_with_templates"]
51
55
 
52
56
  @model_validator(mode="before")
@@ -23,7 +23,12 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
23
23
  Options are: 'f-string', 'mustache', 'jinja2'."""
24
24
 
25
25
  def __init__(self, **kwargs: Any) -> None:
26
- """Create an image prompt template."""
26
+ """Create an image prompt template.
27
+
28
+ Raises:
29
+ ValueError: If the input variables contain ``'url'``, ``'path'``, or
30
+ ``'detail'``.
31
+ """
27
32
  if "input_variables" not in kwargs:
28
33
  kwargs["input_variables"] = []
29
34
 
@@ -44,7 +49,11 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
44
49
 
45
50
  @classmethod
46
51
  def get_lc_namespace(cls) -> list[str]:
47
- """Get the namespace of the langchain object."""
52
+ """Get the namespace of the langchain object.
53
+
54
+ Returns:
55
+ ``["langchain", "prompts", "image"]``
56
+ """
48
57
  return ["langchain", "prompts", "image"]
49
58
 
50
59
  def format_prompt(self, **kwargs: Any) -> PromptValue:
@@ -84,6 +93,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
84
93
  Raises:
85
94
  ValueError: If the url is not provided.
86
95
  ValueError: If the url is not a string.
96
+ ValueError: If ``'path'`` is provided in the template or kwargs.
87
97
 
88
98
  Example:
89
99
 
@@ -128,9 +138,6 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
128
138
 
129
139
  Returns:
130
140
  A formatted string.
131
-
132
- Raises:
133
- ValueError: If the path or url is not a string.
134
141
  """
135
142
  return await run_in_executor(None, self.format, **kwargs)
136
143
 
@@ -18,17 +18,15 @@ class BaseMessagePromptTemplate(Serializable, ABC):
18
18
 
19
19
  @classmethod
20
20
  def is_lc_serializable(cls) -> bool:
21
- """Return whether or not the class is serializable.
22
-
23
- Returns: True.
24
- """
21
+ """Return True as this class is serializable."""
25
22
  return True
26
23
 
27
24
  @classmethod
28
25
  def get_lc_namespace(cls) -> list[str]:
29
26
  """Get the namespace of the langchain object.
30
27
 
31
- Default namespace is ["langchain", "prompts", "chat"].
28
+ Returns:
29
+ ``["langchain", "prompts", "chat"]``
32
30
  """
33
31
  return ["langchain", "prompts", "chat"]
34
32
 
@@ -90,7 +88,8 @@ class BaseMessagePromptTemplate(Serializable, ABC):
90
88
  Returns:
91
89
  Combined prompt template.
92
90
  """
93
- from langchain_core.prompts.chat import ChatPromptTemplate
91
+ # Import locally to avoid circular import.
92
+ from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
94
93
 
95
94
  prompt = ChatPromptTemplate(messages=[self])
96
95
  return prompt + other
@@ -39,23 +39,28 @@ class PipelinePromptTemplate(BasePromptTemplate):
39
39
  This can be useful when you want to reuse parts of prompts.
40
40
 
41
41
  A PipelinePrompt consists of two main parts:
42
- - final_prompt: This is the final prompt that is returned
43
- - pipeline_prompts: This is a list of tuples, consisting
44
- of a string (`name`) and a Prompt Template.
45
- Each PromptTemplate will be formatted and then passed
46
- to future prompt templates as a variable with
47
- the same name as `name`
42
+
43
+ - final_prompt: This is the final prompt that is returned
44
+ - pipeline_prompts: This is a list of tuples, consisting
45
+ of a string (``name``) and a Prompt Template.
46
+ Each PromptTemplate will be formatted and then passed
47
+ to future prompt templates as a variable with
48
+ the same name as ``name``
48
49
 
49
50
  """
50
51
 
51
52
  final_prompt: BasePromptTemplate
52
53
  """The final prompt that is returned."""
53
54
  pipeline_prompts: list[tuple[str, BasePromptTemplate]]
54
- """A list of tuples, consisting of a string (`name`) and a Prompt Template."""
55
+ """A list of tuples, consisting of a string (``name``) and a Prompt Template."""
55
56
 
56
57
  @classmethod
57
58
  def get_lc_namespace(cls) -> list[str]:
58
- """Get the namespace of the langchain object."""
59
+ """Get the namespace of the langchain object.
60
+
61
+ Returns:
62
+ ``["langchain", "prompts", "pipeline"]``
63
+ """
59
64
  return ["langchain", "prompts", "pipeline"]
60
65
 
61
66
  @model_validator(mode="before")
@@ -69,6 +69,11 @@ class PromptTemplate(StringPromptTemplate):
69
69
  @classmethod
70
70
  @override
71
71
  def get_lc_namespace(cls) -> list[str]:
72
+ """Get the namespace of the langchain object.
73
+
74
+ Returns:
75
+ ``["langchain", "prompts", "prompt"]``
76
+ """
72
77
  return ["langchain", "prompts", "prompt"]
73
78
 
74
79
  template: str
@@ -135,14 +140,20 @@ class PromptTemplate(StringPromptTemplate):
135
140
  return mustache_schema(self.template)
136
141
 
137
142
  def __add__(self, other: Any) -> PromptTemplate:
138
- """Override the + operator to allow for combining prompt templates."""
143
+ """Override the + operator to allow for combining prompt templates.
144
+
145
+ Raises:
146
+ ValueError: If the template formats are not f-string or if there are
147
+ conflicting partial variables.
148
+ NotImplementedError: If the other object is not a ``PromptTemplate`` or str.
149
+
150
+ Returns:
151
+ A new ``PromptTemplate`` that is the combination of the two.
152
+ """
139
153
  # Allow for easy combining
140
154
  if isinstance(other, PromptTemplate):
141
- if self.template_format != "f-string":
142
- msg = "Adding prompt templates only supported for f-strings."
143
- raise ValueError(msg)
144
- if other.template_format != "f-string":
145
- msg = "Adding prompt templates only supported for f-strings."
155
+ if self.template_format != other.template_format:
156
+ msg = "Cannot add templates of different formats"
146
157
  raise ValueError(msg)
147
158
  input_variables = list(
148
159
  set(self.input_variables) | set(other.input_variables)
@@ -160,11 +171,14 @@ class PromptTemplate(StringPromptTemplate):
160
171
  template=template,
161
172
  input_variables=input_variables,
162
173
  partial_variables=partial_variables,
163
- template_format="f-string",
174
+ template_format=self.template_format,
164
175
  validate_template=validate_template,
165
176
  )
166
177
  if isinstance(other, str):
167
- prompt = PromptTemplate.from_template(other)
178
+ prompt = PromptTemplate.from_template(
179
+ other,
180
+ template_format=self.template_format,
181
+ )
168
182
  return self + prompt
169
183
  msg = f"Unsupported operand type for +: {type(other)}"
170
184
  raise NotImplementedError(msg)
@@ -15,6 +15,14 @@ from langchain_core.utils import get_colored_text, mustache
15
15
  from langchain_core.utils.formatting import formatter
16
16
  from langchain_core.utils.interactive_env import is_interactive_env
17
17
 
18
+ try:
19
+ from jinja2 import Environment, meta
20
+ from jinja2.sandbox import SandboxedEnvironment
21
+
22
+ _HAS_JINJA2 = True
23
+ except ImportError:
24
+ _HAS_JINJA2 = False
25
+
18
26
  PromptTemplateFormat = Literal["f-string", "mustache", "jinja2"]
19
27
 
20
28
 
@@ -40,9 +48,7 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str:
40
48
  Raises:
41
49
  ImportError: If jinja2 is not installed.
42
50
  """
43
- try:
44
- from jinja2.sandbox import SandboxedEnvironment
45
- except ImportError as e:
51
+ if not _HAS_JINJA2:
46
52
  msg = (
47
53
  "jinja2 not installed, which is needed to use the jinja2_formatter. "
48
54
  "Please install it with `pip install jinja2`."
@@ -50,7 +56,7 @@ def jinja2_formatter(template: str, /, **kwargs: Any) -> str:
50
56
  "Do not expand jinja2 templates using unverified or user-controlled "
51
57
  "inputs as that can result in arbitrary Python code execution."
52
58
  )
53
- raise ImportError(msg) from e
59
+ raise ImportError(msg)
54
60
 
55
61
  # This uses a sandboxed environment to prevent arbitrary code execution.
56
62
  # Jinja2 uses an opt-out rather than opt-in approach for sand-boxing.
@@ -88,14 +94,12 @@ def validate_jinja2(template: str, input_variables: list[str]) -> None:
88
94
 
89
95
 
90
96
  def _get_jinja2_variables_from_template(template: str) -> set[str]:
91
- try:
92
- from jinja2 import Environment, meta
93
- except ImportError as e:
97
+ if not _HAS_JINJA2:
94
98
  msg = (
95
99
  "jinja2 not installed, which is needed to use the jinja2_formatter. "
96
100
  "Please install it with `pip install jinja2`."
97
101
  )
98
- raise ImportError(msg) from e
102
+ raise ImportError(msg)
99
103
  env = Environment() # noqa: S701
100
104
  ast = env.parse(template)
101
105
  return meta.find_undeclared_variables(ast)
@@ -166,7 +170,7 @@ def mustache_schema(
166
170
  prefix = section_stack.pop()
167
171
  elif type_ in {"section", "inverted section"}:
168
172
  section_stack.append(prefix)
169
- prefix = prefix + tuple(key.split("."))
173
+ prefix += tuple(key.split("."))
170
174
  fields[prefix] = False
171
175
  elif type_ in {"variable", "no escape"}:
172
176
  fields[prefix + tuple(key.split("."))] = True
@@ -268,7 +272,11 @@ class StringPromptTemplate(BasePromptTemplate, ABC):
268
272
 
269
273
  @classmethod
270
274
  def get_lc_namespace(cls) -> list[str]:
271
- """Get the namespace of the langchain object."""
275
+ """Get the namespace of the langchain object.
276
+
277
+ Returns:
278
+ ``["langchain", "prompts", "base"]``
279
+ """
272
280
  return ["langchain", "prompts", "base"]
273
281
 
274
282
  def format_prompt(self, **kwargs: Any) -> PromptValue: