langchain-core 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (119) hide show
  1. langchain_core/_api/beta_decorator.py +22 -44
  2. langchain_core/_api/deprecation.py +30 -17
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +28 -15
  8. langchain_core/callbacks/manager.py +83 -71
  9. langchain_core/callbacks/usage.py +6 -4
  10. langchain_core/chat_history.py +29 -21
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +4 -1
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +88 -76
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +98 -27
  26. langchain_core/language_models/fake_chat_models.py +10 -9
  27. langchain_core/language_models/llms.py +52 -18
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +39 -44
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +53 -24
  33. langchain_core/messages/base.py +43 -22
  34. langchain_core/messages/chat.py +4 -1
  35. langchain_core/messages/content_blocks.py +23 -2
  36. langchain_core/messages/function.py +9 -5
  37. langchain_core/messages/human.py +13 -10
  38. langchain_core/messages/modifier.py +1 -0
  39. langchain_core/messages/system.py +11 -8
  40. langchain_core/messages/tool.py +60 -29
  41. langchain_core/messages/utils.py +250 -131
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +6 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/xml.py +19 -19
  50. langchain_core/outputs/chat_generation.py +25 -10
  51. langchain_core/outputs/generation.py +14 -3
  52. langchain_core/outputs/llm_result.py +8 -1
  53. langchain_core/prompt_values.py +16 -6
  54. langchain_core/prompts/base.py +4 -9
  55. langchain_core/prompts/chat.py +89 -57
  56. langchain_core/prompts/dict.py +16 -8
  57. langchain_core/prompts/few_shot.py +12 -11
  58. langchain_core/prompts/few_shot_with_templates.py +5 -1
  59. langchain_core/prompts/image.py +12 -5
  60. langchain_core/prompts/message.py +5 -6
  61. langchain_core/prompts/pipeline.py +13 -8
  62. langchain_core/prompts/prompt.py +22 -8
  63. langchain_core/prompts/string.py +18 -10
  64. langchain_core/prompts/structured.py +7 -2
  65. langchain_core/rate_limiters.py +2 -2
  66. langchain_core/retrievers.py +7 -6
  67. langchain_core/runnables/base.py +406 -186
  68. langchain_core/runnables/branch.py +14 -19
  69. langchain_core/runnables/config.py +9 -15
  70. langchain_core/runnables/configurable.py +34 -19
  71. langchain_core/runnables/fallbacks.py +20 -13
  72. langchain_core/runnables/graph.py +48 -38
  73. langchain_core/runnables/graph_ascii.py +41 -18
  74. langchain_core/runnables/graph_mermaid.py +54 -25
  75. langchain_core/runnables/graph_png.py +27 -31
  76. langchain_core/runnables/history.py +55 -58
  77. langchain_core/runnables/passthrough.py +44 -21
  78. langchain_core/runnables/retry.py +44 -23
  79. langchain_core/runnables/router.py +9 -8
  80. langchain_core/runnables/schema.py +2 -0
  81. langchain_core/runnables/utils.py +51 -89
  82. langchain_core/stores.py +19 -31
  83. langchain_core/sys_info.py +9 -8
  84. langchain_core/tools/base.py +37 -28
  85. langchain_core/tools/convert.py +26 -15
  86. langchain_core/tools/simple.py +36 -8
  87. langchain_core/tools/structured.py +25 -12
  88. langchain_core/tracers/base.py +2 -2
  89. langchain_core/tracers/context.py +5 -1
  90. langchain_core/tracers/core.py +109 -39
  91. langchain_core/tracers/evaluation.py +22 -26
  92. langchain_core/tracers/event_stream.py +45 -34
  93. langchain_core/tracers/langchain.py +12 -3
  94. langchain_core/tracers/langchain_v1.py +10 -2
  95. langchain_core/tracers/log_stream.py +56 -17
  96. langchain_core/tracers/root_listeners.py +4 -20
  97. langchain_core/tracers/run_collector.py +6 -16
  98. langchain_core/tracers/schemas.py +5 -1
  99. langchain_core/utils/aiter.py +15 -7
  100. langchain_core/utils/env.py +3 -0
  101. langchain_core/utils/function_calling.py +50 -28
  102. langchain_core/utils/interactive_env.py +6 -2
  103. langchain_core/utils/iter.py +12 -4
  104. langchain_core/utils/json.py +12 -3
  105. langchain_core/utils/json_schema.py +156 -40
  106. langchain_core/utils/loading.py +5 -1
  107. langchain_core/utils/mustache.py +24 -15
  108. langchain_core/utils/pydantic.py +38 -9
  109. langchain_core/utils/utils.py +25 -9
  110. langchain_core/vectorstores/base.py +7 -20
  111. langchain_core/vectorstores/in_memory.py +23 -17
  112. langchain_core/vectorstores/utils.py +18 -12
  113. langchain_core/version.py +1 -1
  114. langchain_core-0.3.77.dist-info/METADATA +67 -0
  115. langchain_core-0.3.77.dist-info/RECORD +174 -0
  116. langchain_core-0.3.75.dist-info/METADATA +0 -106
  117. langchain_core-0.3.75.dist-info/RECORD +0 -174
  118. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/WHEEL +0 -0
  119. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/entry_points.txt +0 -0
@@ -67,10 +67,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
67
67
  from langchain_core.prompts import MessagesPlaceholder
68
68
 
69
69
  prompt = MessagesPlaceholder("history")
70
- prompt.format_messages() # raises KeyError
70
+ prompt.format_messages() # raises KeyError
71
71
 
72
72
  prompt = MessagesPlaceholder("history", optional=True)
73
- prompt.format_messages() # returns empty list []
73
+ prompt.format_messages() # returns empty list []
74
74
 
75
75
  prompt.format_messages(
76
76
  history=[
@@ -93,14 +93,14 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
93
93
  [
94
94
  ("system", "You are a helpful assistant."),
95
95
  MessagesPlaceholder("history"),
96
- ("human", "{question}")
96
+ ("human", "{question}"),
97
97
  ]
98
98
  )
99
99
  prompt.invoke(
100
- {
101
- "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
102
- "question": "now multiply that by 4"
103
- }
100
+ {
101
+ "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
102
+ "question": "now multiply that by 4",
103
+ }
104
104
  )
105
105
  # -> ChatPromptValue(messages=[
106
106
  # SystemMessage(content="You are a helpful assistant."),
@@ -262,7 +262,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
262
262
  def from_template_file(
263
263
  cls,
264
264
  template_file: Union[str, Path],
265
- input_variables: list[str],
265
+ input_variables: list[str], # noqa: ARG003 # Deprecated
266
266
  **kwargs: Any,
267
267
  ) -> Self:
268
268
  """Create a class from a template file.
@@ -275,7 +275,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
275
275
  Returns:
276
276
  A new instance of this class.
277
277
  """
278
- prompt = PromptTemplate.from_file(template_file, input_variables)
278
+ prompt = PromptTemplate.from_file(template_file)
279
279
  return cls(prompt=prompt, **kwargs)
280
280
 
281
281
  @abstractmethod
@@ -740,10 +740,18 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
740
740
 
741
741
  @abstractmethod
742
742
  def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
743
- """Format kwargs into a list of messages."""
743
+ """Format kwargs into a list of messages.
744
+
745
+ Returns:
746
+ List of messages.
747
+ """
744
748
 
745
749
  async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
746
- """Async format kwargs into a list of messages."""
750
+ """Async format kwargs into a list of messages.
751
+
752
+ Returns:
753
+ List of messages.
754
+ """
747
755
  return self.format_messages(**kwargs)
748
756
 
749
757
  def pretty_repr(
@@ -795,17 +803,19 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
795
803
 
796
804
  from langchain_core.prompts import ChatPromptTemplate
797
805
 
798
- template = ChatPromptTemplate([
799
- ("system", "You are a helpful AI bot. Your name is {name}."),
800
- ("human", "Hello, how are you doing?"),
801
- ("ai", "I'm doing well, thanks!"),
802
- ("human", "{user_input}"),
803
- ])
806
+ template = ChatPromptTemplate(
807
+ [
808
+ ("system", "You are a helpful AI bot. Your name is {name}."),
809
+ ("human", "Hello, how are you doing?"),
810
+ ("ai", "I'm doing well, thanks!"),
811
+ ("human", "{user_input}"),
812
+ ]
813
+ )
804
814
 
805
815
  prompt_value = template.invoke(
806
816
  {
807
817
  "name": "Bob",
808
- "user_input": "What is your name?"
818
+ "user_input": "What is your name?",
809
819
  }
810
820
  )
811
821
  # Output:
@@ -816,7 +826,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
816
826
  # AIMessage(content="I'm doing well, thanks!"),
817
827
  # HumanMessage(content='What is your name?')
818
828
  # ]
819
- #)
829
+ # )
820
830
 
821
831
  Messages Placeholder:
822
832
 
@@ -826,14 +836,16 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
826
836
  # you can initialize the template with a MessagesPlaceholder
827
837
  # either using the class directly or with the shorthand tuple syntax:
828
838
 
829
- template = ChatPromptTemplate([
830
- ("system", "You are a helpful AI bot."),
831
- # Means the template will receive an optional list of messages under
832
- # the "conversation" key
833
- ("placeholder", "{conversation}")
834
- # Equivalently:
835
- # MessagesPlaceholder(variable_name="conversation", optional=True)
836
- ])
839
+ template = ChatPromptTemplate(
840
+ [
841
+ ("system", "You are a helpful AI bot."),
842
+ # Means the template will receive an optional list of messages under
843
+ # the "conversation" key
844
+ ("placeholder", "{conversation}"),
845
+ # Equivalently:
846
+ # MessagesPlaceholder(variable_name="conversation", optional=True)
847
+ ]
848
+ )
837
849
 
838
850
  prompt_value = template.invoke(
839
851
  {
@@ -841,7 +853,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
841
853
  ("human", "Hi!"),
842
854
  ("ai", "How can I assist you today?"),
843
855
  ("human", "Can you make me an ice cream sundae?"),
844
- ("ai", "No.")
856
+ ("ai", "No."),
845
857
  ]
846
858
  }
847
859
  )
@@ -855,7 +867,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
855
867
  # HumanMessage(content='Can you make me an ice cream sundae?'),
856
868
  # AIMessage(content='No.'),
857
869
  # ]
858
- #)
870
+ # )
859
871
 
860
872
  Single-variable template:
861
873
 
@@ -868,10 +880,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
868
880
 
869
881
  from langchain_core.prompts import ChatPromptTemplate
870
882
 
871
- template = ChatPromptTemplate([
872
- ("system", "You are a helpful AI bot. Your name is Carl."),
873
- ("human", "{user_input}"),
874
- ])
883
+ template = ChatPromptTemplate(
884
+ [
885
+ ("system", "You are a helpful AI bot. Your name is Carl."),
886
+ ("human", "{user_input}"),
887
+ ]
888
+ )
875
889
 
876
890
  prompt_value = template.invoke("Hello, there!")
877
891
  # Equivalent to
@@ -922,28 +936,29 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
922
936
  input_types: A dictionary of the types of the variables the prompt template
923
937
  expects. If not provided, all variables are assumed to be strings.
924
938
 
925
- Returns:
926
- A chat prompt template.
927
-
928
939
  Examples:
929
940
  Instantiation from a list of message templates:
930
941
 
931
942
  .. code-block:: python
932
943
 
933
- template = ChatPromptTemplate([
934
- ("human", "Hello, how are you?"),
935
- ("ai", "I'm doing well, thanks!"),
936
- ("human", "That's good to hear."),
937
- ])
944
+ template = ChatPromptTemplate(
945
+ [
946
+ ("human", "Hello, how are you?"),
947
+ ("ai", "I'm doing well, thanks!"),
948
+ ("human", "That's good to hear."),
949
+ ]
950
+ )
938
951
 
939
952
  Instantiation from mixed message formats:
940
953
 
941
954
  .. code-block:: python
942
955
 
943
- template = ChatPromptTemplate([
944
- SystemMessage(content="hello"),
945
- ("human", "Hello, how are you?"),
946
- ])
956
+ template = ChatPromptTemplate(
957
+ [
958
+ SystemMessage(content="hello"),
959
+ ("human", "Hello, how are you?"),
960
+ ]
961
+ )
947
962
 
948
963
  """
949
964
  messages_ = [
@@ -974,7 +989,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
974
989
 
975
990
  @classmethod
976
991
  def get_lc_namespace(cls) -> list[str]:
977
- """Get the namespace of the langchain object."""
992
+ """Get the namespace of the langchain object.
993
+
994
+ Returns:
995
+ ``["langchain", "prompts", "chat"]``
996
+ """
978
997
  return ["langchain", "prompts", "chat"]
979
998
 
980
999
  def __add__(self, other: Any) -> ChatPromptTemplate:
@@ -1137,20 +1156,24 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1137
1156
 
1138
1157
  .. code-block:: python
1139
1158
 
1140
- template = ChatPromptTemplate.from_messages([
1141
- ("human", "Hello, how are you?"),
1142
- ("ai", "I'm doing well, thanks!"),
1143
- ("human", "That's good to hear."),
1144
- ])
1159
+ template = ChatPromptTemplate.from_messages(
1160
+ [
1161
+ ("human", "Hello, how are you?"),
1162
+ ("ai", "I'm doing well, thanks!"),
1163
+ ("human", "That's good to hear."),
1164
+ ]
1165
+ )
1145
1166
 
1146
1167
  Instantiation from mixed message formats:
1147
1168
 
1148
1169
  .. code-block:: python
1149
1170
 
1150
- template = ChatPromptTemplate.from_messages([
1151
- SystemMessage(content="hello"),
1152
- ("human", "Hello, how are you?"),
1153
- ])
1171
+ template = ChatPromptTemplate.from_messages(
1172
+ [
1173
+ SystemMessage(content="hello"),
1174
+ ("human", "Hello, how are you?"),
1175
+ ]
1176
+ )
1154
1177
 
1155
1178
  Args:
1156
1179
  messages: sequence of message representations.
@@ -1174,6 +1197,9 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1174
1197
  **kwargs: keyword arguments to use for filling in template variables
1175
1198
  in all the template messages in this chat template.
1176
1199
 
1200
+ Raises:
1201
+ ValueError: if messages are of unexpected types.
1202
+
1177
1203
  Returns:
1178
1204
  list of formatted messages.
1179
1205
  """
@@ -1284,7 +1310,13 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1284
1310
  def __getitem__(
1285
1311
  self, index: Union[int, slice]
1286
1312
  ) -> Union[MessageLike, ChatPromptTemplate]:
1287
- """Use to index into the chat template."""
1313
+ """Use to index into the chat template.
1314
+
1315
+ Returns:
1316
+ If index is an int, returns the message at that index.
1317
+ If index is a slice, returns a new ``ChatPromptTemplate``
1318
+ containing the messages in that slice.
1319
+ """
1288
1320
  if isinstance(index, slice):
1289
1321
  start, stop, step = index.indices(len(self.messages))
1290
1322
  messages = self.messages[start:stop:step]
@@ -1292,7 +1324,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1292
1324
  return self.messages[index]
1293
1325
 
1294
1326
  def __len__(self) -> int:
1295
- """Get the length of the chat template."""
1327
+ """Return the length of the chat template."""
1296
1328
  return len(self.messages)
1297
1329
 
1298
1330
  @property
@@ -31,18 +31,25 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
31
31
  return _get_input_variables(self.template, self.template_format)
32
32
 
33
33
  def format(self, **kwargs: Any) -> dict[str, Any]:
34
- """Format the prompt with the inputs."""
34
+ """Format the prompt with the inputs.
35
+
36
+ Returns:
37
+ A formatted dict.
38
+ """
35
39
  return _insert_input_variables(self.template, kwargs, self.template_format)
36
40
 
37
41
  async def aformat(self, **kwargs: Any) -> dict[str, Any]:
38
- """Format the prompt with the inputs."""
42
+ """Format the prompt with the inputs.
43
+
44
+ Returns:
45
+ A formatted dict.
46
+ """
39
47
  return self.format(**kwargs)
40
48
 
41
49
  @override
42
50
  def invoke(
43
51
  self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
44
52
  ) -> dict:
45
- """Invoke the prompt."""
46
53
  return self._call_with_config(
47
54
  lambda x: self.format(**x),
48
55
  input,
@@ -62,15 +69,16 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
62
69
 
63
70
  @classmethod
64
71
  def is_lc_serializable(cls) -> bool:
65
- """Return whether or not the class is serializable.
66
-
67
- Returns: True.
68
- """
72
+ """Return True as this class is serializable."""
69
73
  return True
70
74
 
71
75
  @classmethod
72
76
  def get_lc_namespace(cls) -> list[str]:
73
- """Serialization namespace."""
77
+ """Get the namespace of the langchain object.
78
+
79
+ Returns:
80
+ ``["langchain_core", "prompts", "dict"]``
81
+ """
74
82
  return ["langchain_core", "prompts", "dict"]
75
83
 
76
84
  def pretty_repr(self, *, html: bool = False) -> str:
@@ -117,7 +117,7 @@ class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
117
117
 
118
118
  @classmethod
119
119
  def is_lc_serializable(cls) -> bool:
120
- """Return whether or not the class is serializable."""
120
+ """Return False as this class is not serializable."""
121
121
  return False
122
122
 
123
123
  validate_template: bool = False
@@ -153,7 +153,7 @@ class FewShotPromptTemplate(_FewShotPromptTemplateMixin, StringPromptTemplate):
153
153
  self.template_format,
154
154
  self.input_variables + list(self.partial_variables),
155
155
  )
156
- elif self.template_format or None:
156
+ elif self.template_format:
157
157
  self.input_variables = [
158
158
  var
159
159
  for var in get_template_variables(
@@ -272,7 +272,7 @@ class FewShotChatMessagePromptTemplate(
272
272
 
273
273
  from langchain_core.prompts import (
274
274
  FewShotChatMessagePromptTemplate,
275
- ChatPromptTemplate
275
+ ChatPromptTemplate,
276
276
  )
277
277
 
278
278
  examples = [
@@ -281,7 +281,10 @@ class FewShotChatMessagePromptTemplate(
281
281
  ]
282
282
 
283
283
  example_prompt = ChatPromptTemplate.from_messages(
284
- [('human', 'What is {input}?'), ('ai', '{output}')]
284
+ [
285
+ ("human", "What is {input}?"),
286
+ ("ai", "{output}"),
287
+ ]
285
288
  )
286
289
 
287
290
  few_shot_prompt = FewShotChatMessagePromptTemplate(
@@ -292,9 +295,9 @@ class FewShotChatMessagePromptTemplate(
292
295
 
293
296
  final_prompt = ChatPromptTemplate.from_messages(
294
297
  [
295
- ('system', 'You are a helpful AI Assistant'),
298
+ ("system", "You are a helpful AI Assistant"),
296
299
  few_shot_prompt,
297
- ('human', '{input}'),
300
+ ("human", "{input}"),
298
301
  ]
299
302
  )
300
303
  final_prompt.format(input="What is 4+4?")
@@ -314,10 +317,7 @@ class FewShotChatMessagePromptTemplate(
314
317
  # ...
315
318
  ]
316
319
 
317
- to_vectorize = [
318
- " ".join(example.values())
319
- for example in examples
320
- ]
320
+ to_vectorize = [" ".join(example.values()) for example in examples]
321
321
  embeddings = OpenAIEmbeddings()
322
322
  vectorstore = Chroma.from_texts(
323
323
  to_vectorize, embeddings, metadatas=examples
@@ -355,6 +355,7 @@ class FewShotChatMessagePromptTemplate(
355
355
 
356
356
  # Use within an LLM
357
357
  from langchain_core.chat_models import ChatAnthropic
358
+
358
359
  chain = final_prompt | ChatAnthropic(model="claude-3-haiku-20240307")
359
360
  chain.invoke({"input": "What's 3+3?"})
360
361
 
@@ -369,7 +370,7 @@ class FewShotChatMessagePromptTemplate(
369
370
 
370
371
  @classmethod
371
372
  def is_lc_serializable(cls) -> bool:
372
- """Return whether or not the class is serializable."""
373
+ """Return False as this class is not serializable."""
373
374
  return False
374
375
 
375
376
  model_config = ConfigDict(
@@ -46,7 +46,11 @@ class FewShotPromptWithTemplates(StringPromptTemplate):
46
46
 
47
47
  @classmethod
48
48
  def get_lc_namespace(cls) -> list[str]:
49
- """Get the namespace of the langchain object."""
49
+ """Get the namespace of the langchain object.
50
+
51
+ Returns:
52
+ ``["langchain", "prompts", "few_shot_with_templates"]``
53
+ """
50
54
  return ["langchain", "prompts", "few_shot_with_templates"]
51
55
 
52
56
  @model_validator(mode="before")
@@ -23,7 +23,12 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
23
23
  Options are: 'f-string', 'mustache', 'jinja2'."""
24
24
 
25
25
  def __init__(self, **kwargs: Any) -> None:
26
- """Create an image prompt template."""
26
+ """Create an image prompt template.
27
+
28
+ Raises:
29
+ ValueError: If the input variables contain ``'url'``, ``'path'``, or
30
+ ``'detail'``.
31
+ """
27
32
  if "input_variables" not in kwargs:
28
33
  kwargs["input_variables"] = []
29
34
 
@@ -44,7 +49,11 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
44
49
 
45
50
  @classmethod
46
51
  def get_lc_namespace(cls) -> list[str]:
47
- """Get the namespace of the langchain object."""
52
+ """Get the namespace of the langchain object.
53
+
54
+ Returns:
55
+ ``["langchain", "prompts", "image"]``
56
+ """
48
57
  return ["langchain", "prompts", "image"]
49
58
 
50
59
  def format_prompt(self, **kwargs: Any) -> PromptValue:
@@ -84,6 +93,7 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
84
93
  Raises:
85
94
  ValueError: If the url is not provided.
86
95
  ValueError: If the url is not a string.
96
+ ValueError: If ``'path'`` is provided in the template or kwargs.
87
97
 
88
98
  Example:
89
99
 
@@ -128,9 +138,6 @@ class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
128
138
 
129
139
  Returns:
130
140
  A formatted string.
131
-
132
- Raises:
133
- ValueError: If the path or url is not a string.
134
141
  """
135
142
  return await run_in_executor(None, self.format, **kwargs)
136
143
 
@@ -18,17 +18,15 @@ class BaseMessagePromptTemplate(Serializable, ABC):
18
18
 
19
19
  @classmethod
20
20
  def is_lc_serializable(cls) -> bool:
21
- """Return whether or not the class is serializable.
22
-
23
- Returns: True.
24
- """
21
+ """Return True as this class is serializable."""
25
22
  return True
26
23
 
27
24
  @classmethod
28
25
  def get_lc_namespace(cls) -> list[str]:
29
26
  """Get the namespace of the langchain object.
30
27
 
31
- Default namespace is ["langchain", "prompts", "chat"].
28
+ Returns:
29
+ ``["langchain", "prompts", "chat"]``
32
30
  """
33
31
  return ["langchain", "prompts", "chat"]
34
32
 
@@ -90,7 +88,8 @@ class BaseMessagePromptTemplate(Serializable, ABC):
90
88
  Returns:
91
89
  Combined prompt template.
92
90
  """
93
- from langchain_core.prompts.chat import ChatPromptTemplate
91
+ # Import locally to avoid circular import.
92
+ from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
94
93
 
95
94
  prompt = ChatPromptTemplate(messages=[self])
96
95
  return prompt + other
@@ -39,23 +39,28 @@ class PipelinePromptTemplate(BasePromptTemplate):
39
39
  This can be useful when you want to reuse parts of prompts.
40
40
 
41
41
  A PipelinePrompt consists of two main parts:
42
- - final_prompt: This is the final prompt that is returned
43
- - pipeline_prompts: This is a list of tuples, consisting
44
- of a string (`name`) and a Prompt Template.
45
- Each PromptTemplate will be formatted and then passed
46
- to future prompt templates as a variable with
47
- the same name as `name`
42
+
43
+ - final_prompt: This is the final prompt that is returned
44
+ - pipeline_prompts: This is a list of tuples, consisting
45
+ of a string (``name``) and a Prompt Template.
46
+ Each PromptTemplate will be formatted and then passed
47
+ to future prompt templates as a variable with
48
+ the same name as ``name``
48
49
 
49
50
  """
50
51
 
51
52
  final_prompt: BasePromptTemplate
52
53
  """The final prompt that is returned."""
53
54
  pipeline_prompts: list[tuple[str, BasePromptTemplate]]
54
- """A list of tuples, consisting of a string (`name`) and a Prompt Template."""
55
+ """A list of tuples, consisting of a string (``name``) and a Prompt Template."""
55
56
 
56
57
  @classmethod
57
58
  def get_lc_namespace(cls) -> list[str]:
58
- """Get the namespace of the langchain object."""
59
+ """Get the namespace of the langchain object.
60
+
61
+ Returns:
62
+ ``["langchain", "prompts", "pipeline"]``
63
+ """
59
64
  return ["langchain", "prompts", "pipeline"]
60
65
 
61
66
  @model_validator(mode="before")
@@ -69,6 +69,11 @@ class PromptTemplate(StringPromptTemplate):
69
69
  @classmethod
70
70
  @override
71
71
  def get_lc_namespace(cls) -> list[str]:
72
+ """Get the namespace of the langchain object.
73
+
74
+ Returns:
75
+ ``["langchain", "prompts", "prompt"]``
76
+ """
72
77
  return ["langchain", "prompts", "prompt"]
73
78
 
74
79
  template: str
@@ -135,14 +140,20 @@ class PromptTemplate(StringPromptTemplate):
135
140
  return mustache_schema(self.template)
136
141
 
137
142
  def __add__(self, other: Any) -> PromptTemplate:
138
- """Override the + operator to allow for combining prompt templates."""
143
+ """Override the + operator to allow for combining prompt templates.
144
+
145
+ Raises:
146
+ ValueError: If the template formats are not f-string or if there are
147
+ conflicting partial variables.
148
+ NotImplementedError: If the other object is not a ``PromptTemplate`` or str.
149
+
150
+ Returns:
151
+ A new ``PromptTemplate`` that is the combination of the two.
152
+ """
139
153
  # Allow for easy combining
140
154
  if isinstance(other, PromptTemplate):
141
- if self.template_format != "f-string":
142
- msg = "Adding prompt templates only supported for f-strings."
143
- raise ValueError(msg)
144
- if other.template_format != "f-string":
145
- msg = "Adding prompt templates only supported for f-strings."
155
+ if self.template_format != other.template_format:
156
+ msg = "Cannot add templates of different formats"
146
157
  raise ValueError(msg)
147
158
  input_variables = list(
148
159
  set(self.input_variables) | set(other.input_variables)
@@ -160,11 +171,14 @@ class PromptTemplate(StringPromptTemplate):
160
171
  template=template,
161
172
  input_variables=input_variables,
162
173
  partial_variables=partial_variables,
163
- template_format="f-string",
174
+ template_format=self.template_format,
164
175
  validate_template=validate_template,
165
176
  )
166
177
  if isinstance(other, str):
167
- prompt = PromptTemplate.from_template(other)
178
+ prompt = PromptTemplate.from_template(
179
+ other,
180
+ template_format=self.template_format,
181
+ )
168
182
  return self + prompt
169
183
  msg = f"Unsupported operand type for +: {type(other)}"
170
184
  raise NotImplementedError(msg)