langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (142) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +10 -9
  7. langchain_core/caches.py +46 -56
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +43 -58
  16. langchain_core/document_loaders/base.py +21 -21
  17. langchain_core/document_loaders/langsmith.py +22 -22
  18. langchain_core/documents/__init__.py +0 -1
  19. langchain_core/documents/base.py +46 -49
  20. langchain_core/documents/transformers.py +28 -29
  21. langchain_core/embeddings/fake.py +50 -54
  22. langchain_core/example_selectors/semantic_similarity.py +4 -6
  23. langchain_core/exceptions.py +7 -8
  24. langchain_core/indexing/api.py +19 -25
  25. langchain_core/indexing/base.py +24 -24
  26. langchain_core/language_models/__init__.py +11 -27
  27. langchain_core/language_models/_utils.py +53 -54
  28. langchain_core/language_models/base.py +30 -24
  29. langchain_core/language_models/chat_models.py +123 -148
  30. langchain_core/language_models/fake_chat_models.py +7 -7
  31. langchain_core/language_models/llms.py +14 -16
  32. langchain_core/load/dump.py +3 -4
  33. langchain_core/load/load.py +7 -16
  34. langchain_core/load/serializable.py +37 -36
  35. langchain_core/messages/__init__.py +1 -16
  36. langchain_core/messages/ai.py +122 -123
  37. langchain_core/messages/base.py +31 -31
  38. langchain_core/messages/block_translators/__init__.py +17 -17
  39. langchain_core/messages/block_translators/anthropic.py +3 -3
  40. langchain_core/messages/block_translators/bedrock_converse.py +3 -3
  41. langchain_core/messages/block_translators/google_genai.py +5 -4
  42. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  43. langchain_core/messages/block_translators/groq.py +117 -21
  44. langchain_core/messages/block_translators/langchain_v0.py +3 -3
  45. langchain_core/messages/block_translators/openai.py +5 -5
  46. langchain_core/messages/chat.py +2 -6
  47. langchain_core/messages/content.py +222 -209
  48. langchain_core/messages/function.py +6 -10
  49. langchain_core/messages/human.py +17 -24
  50. langchain_core/messages/modifier.py +2 -2
  51. langchain_core/messages/system.py +12 -22
  52. langchain_core/messages/tool.py +53 -69
  53. langchain_core/messages/utils.py +399 -417
  54. langchain_core/output_parsers/__init__.py +1 -14
  55. langchain_core/output_parsers/base.py +46 -47
  56. langchain_core/output_parsers/json.py +3 -4
  57. langchain_core/output_parsers/list.py +2 -2
  58. langchain_core/output_parsers/openai_functions.py +46 -44
  59. langchain_core/output_parsers/openai_tools.py +11 -16
  60. langchain_core/output_parsers/pydantic.py +10 -11
  61. langchain_core/output_parsers/string.py +2 -2
  62. langchain_core/output_parsers/transform.py +2 -2
  63. langchain_core/output_parsers/xml.py +1 -1
  64. langchain_core/outputs/__init__.py +1 -1
  65. langchain_core/outputs/chat_generation.py +14 -14
  66. langchain_core/outputs/generation.py +6 -6
  67. langchain_core/outputs/llm_result.py +5 -5
  68. langchain_core/prompt_values.py +11 -11
  69. langchain_core/prompts/__init__.py +3 -23
  70. langchain_core/prompts/base.py +33 -38
  71. langchain_core/prompts/chat.py +222 -229
  72. langchain_core/prompts/dict.py +3 -3
  73. langchain_core/prompts/few_shot.py +76 -83
  74. langchain_core/prompts/few_shot_with_templates.py +7 -9
  75. langchain_core/prompts/image.py +12 -14
  76. langchain_core/prompts/loading.py +1 -1
  77. langchain_core/prompts/message.py +3 -3
  78. langchain_core/prompts/prompt.py +20 -23
  79. langchain_core/prompts/string.py +20 -8
  80. langchain_core/prompts/structured.py +26 -27
  81. langchain_core/rate_limiters.py +50 -58
  82. langchain_core/retrievers.py +41 -182
  83. langchain_core/runnables/base.py +565 -597
  84. langchain_core/runnables/branch.py +8 -8
  85. langchain_core/runnables/config.py +37 -44
  86. langchain_core/runnables/configurable.py +9 -10
  87. langchain_core/runnables/fallbacks.py +9 -9
  88. langchain_core/runnables/graph.py +46 -50
  89. langchain_core/runnables/graph_ascii.py +19 -18
  90. langchain_core/runnables/graph_mermaid.py +20 -31
  91. langchain_core/runnables/graph_png.py +7 -7
  92. langchain_core/runnables/history.py +22 -22
  93. langchain_core/runnables/passthrough.py +11 -11
  94. langchain_core/runnables/retry.py +3 -3
  95. langchain_core/runnables/router.py +2 -2
  96. langchain_core/runnables/schema.py +33 -33
  97. langchain_core/runnables/utils.py +30 -34
  98. langchain_core/stores.py +72 -102
  99. langchain_core/sys_info.py +27 -29
  100. langchain_core/tools/__init__.py +1 -14
  101. langchain_core/tools/base.py +70 -71
  102. langchain_core/tools/convert.py +100 -104
  103. langchain_core/tools/render.py +9 -9
  104. langchain_core/tools/retriever.py +7 -7
  105. langchain_core/tools/simple.py +6 -7
  106. langchain_core/tools/structured.py +18 -24
  107. langchain_core/tracers/__init__.py +1 -9
  108. langchain_core/tracers/base.py +35 -35
  109. langchain_core/tracers/context.py +12 -17
  110. langchain_core/tracers/event_stream.py +3 -3
  111. langchain_core/tracers/langchain.py +8 -8
  112. langchain_core/tracers/log_stream.py +17 -18
  113. langchain_core/tracers/memory_stream.py +3 -3
  114. langchain_core/tracers/root_listeners.py +2 -2
  115. langchain_core/tracers/schemas.py +0 -129
  116. langchain_core/tracers/stdout.py +1 -2
  117. langchain_core/utils/__init__.py +1 -1
  118. langchain_core/utils/aiter.py +32 -32
  119. langchain_core/utils/env.py +5 -5
  120. langchain_core/utils/function_calling.py +59 -154
  121. langchain_core/utils/html.py +4 -4
  122. langchain_core/utils/input.py +3 -3
  123. langchain_core/utils/interactive_env.py +1 -1
  124. langchain_core/utils/iter.py +20 -20
  125. langchain_core/utils/json.py +1 -1
  126. langchain_core/utils/json_schema.py +2 -2
  127. langchain_core/utils/mustache.py +5 -5
  128. langchain_core/utils/pydantic.py +17 -17
  129. langchain_core/utils/strings.py +5 -5
  130. langchain_core/utils/utils.py +25 -28
  131. langchain_core/vectorstores/base.py +55 -87
  132. langchain_core/vectorstores/in_memory.py +83 -85
  133. langchain_core/vectorstores/utils.py +2 -2
  134. langchain_core/version.py +1 -1
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +23 -11
  136. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  137. langchain_core/memory.py +0 -120
  138. langchain_core/pydantic_v1/__init__.py +0 -30
  139. langchain_core/pydantic_v1/dataclasses.py +0 -23
  140. langchain_core/pydantic_v1/main.py +0 -23
  141. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  142. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -59,84 +59,83 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
59
59
 
60
60
  Direct usage:
61
61
 
62
- .. code-block:: python
62
+ ```python
63
+ from langchain_core.prompts import MessagesPlaceholder
63
64
 
64
- from langchain_core.prompts import MessagesPlaceholder
65
+ prompt = MessagesPlaceholder("history")
66
+ prompt.format_messages() # raises KeyError
65
67
 
66
- prompt = MessagesPlaceholder("history")
67
- prompt.format_messages() # raises KeyError
68
+ prompt = MessagesPlaceholder("history", optional=True)
69
+ prompt.format_messages() # returns empty list []
68
70
 
69
- prompt = MessagesPlaceholder("history", optional=True)
70
- prompt.format_messages() # returns empty list []
71
-
72
- prompt.format_messages(
73
- history=[
74
- ("system", "You are an AI assistant."),
75
- ("human", "Hello!"),
76
- ]
77
- )
78
- # -> [
79
- # SystemMessage(content="You are an AI assistant."),
80
- # HumanMessage(content="Hello!"),
81
- # ]
71
+ prompt.format_messages(
72
+ history=[
73
+ ("system", "You are an AI assistant."),
74
+ ("human", "Hello!"),
75
+ ]
76
+ )
77
+ # -> [
78
+ # SystemMessage(content="You are an AI assistant."),
79
+ # HumanMessage(content="Hello!"),
80
+ # ]
81
+ ```
82
82
 
83
83
  Building a prompt with chat history:
84
84
 
85
- .. code-block:: python
86
-
87
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
85
+ ```python
86
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
88
87
 
89
- prompt = ChatPromptTemplate.from_messages(
90
- [
91
- ("system", "You are a helpful assistant."),
92
- MessagesPlaceholder("history"),
93
- ("human", "{question}"),
94
- ]
95
- )
96
- prompt.invoke(
97
- {
98
- "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
99
- "question": "now multiply that by 4",
100
- }
101
- )
102
- # -> ChatPromptValue(messages=[
103
- # SystemMessage(content="You are a helpful assistant."),
104
- # HumanMessage(content="what's 5 + 2"),
105
- # AIMessage(content="5 + 2 is 7"),
106
- # HumanMessage(content="now multiply that by 4"),
107
- # ])
88
+ prompt = ChatPromptTemplate.from_messages(
89
+ [
90
+ ("system", "You are a helpful assistant."),
91
+ MessagesPlaceholder("history"),
92
+ ("human", "{question}"),
93
+ ]
94
+ )
95
+ prompt.invoke(
96
+ {
97
+ "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
98
+ "question": "now multiply that by 4",
99
+ }
100
+ )
101
+ # -> ChatPromptValue(messages=[
102
+ # SystemMessage(content="You are a helpful assistant."),
103
+ # HumanMessage(content="what's 5 + 2"),
104
+ # AIMessage(content="5 + 2 is 7"),
105
+ # HumanMessage(content="now multiply that by 4"),
106
+ # ])
107
+ ```
108
108
 
109
109
  Limiting the number of messages:
110
110
 
111
- .. code-block:: python
111
+ ```python
112
+ from langchain_core.prompts import MessagesPlaceholder
112
113
 
113
- from langchain_core.prompts import MessagesPlaceholder
114
-
115
- prompt = MessagesPlaceholder("history", n_messages=1)
116
-
117
- prompt.format_messages(
118
- history=[
119
- ("system", "You are an AI assistant."),
120
- ("human", "Hello!"),
121
- ]
122
- )
123
- # -> [
124
- # HumanMessage(content="Hello!"),
125
- # ]
114
+ prompt = MessagesPlaceholder("history", n_messages=1)
126
115
 
116
+ prompt.format_messages(
117
+ history=[
118
+ ("system", "You are an AI assistant."),
119
+ ("human", "Hello!"),
120
+ ]
121
+ )
122
+ # -> [
123
+ # HumanMessage(content="Hello!"),
124
+ # ]
125
+ ```
127
126
  """
128
127
 
129
128
  variable_name: str
130
129
  """Name of variable to use as messages."""
131
130
 
132
131
  optional: bool = False
133
- """If True format_messages can be called with no arguments and will return an empty
134
- list. If False then a named argument with name `variable_name` must be passed
135
- in, even if the value is an empty list."""
132
+ """If `True` format_messages can be called with no arguments and will return an
133
+ empty list. If `False` then a named argument with name `variable_name` must be
134
+ passed in, even if the value is an empty list."""
136
135
 
137
136
  n_messages: PositiveInt | None = None
138
- """Maximum number of messages to include. If None, then will include all.
139
- Defaults to None."""
137
+ """Maximum number of messages to include. If `None`, then will include all.
138
+ """
140
139
 
141
140
  def __init__(
142
141
  self, variable_name: str, *, optional: bool = False, **kwargs: Any
@@ -145,10 +144,9 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
145
144
 
146
145
  Args:
147
146
  variable_name: Name of variable to use as messages.
148
- optional: If True format_messages can be called with no arguments and will
149
- return an empty list. If False then a named argument with name
147
+ optional: If `True` format_messages can be called with no arguments and will
148
+ return an empty list. If `False` then a named argument with name
150
149
  `variable_name` must be passed in, even if the value is an empty list.
151
- Defaults to False.]
152
150
  """
153
151
  # mypy can't detect the init which is defined in the parent class
154
152
  # b/c these are BaseModel classes.
@@ -196,7 +194,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
196
194
  """Human-readable representation.
197
195
 
198
196
  Args:
199
- html: Whether to format as HTML. Defaults to False.
197
+ html: Whether to format as HTML.
200
198
 
201
199
  Returns:
202
200
  Human-readable representation.
@@ -236,13 +234,13 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
236
234
 
237
235
  Args:
238
236
  template: a template.
239
- template_format: format of the template. Defaults to "f-string".
237
+ template_format: format of the template.
240
238
  partial_variables: A dictionary of variables that can be used to partially
241
- fill in the template. For example, if the template is
242
- `"{variable1} {variable2}"`, and `partial_variables` is
243
- `{"variable1": "foo"}`, then the final prompt will be
244
- `"foo {variable2}"`.
245
- Defaults to None.
239
+ fill in the template. For example, if the template is
240
+ `"{variable1} {variable2}"`, and `partial_variables` is
241
+ `{"variable1": "foo"}`, then the final prompt will be
242
+ `"foo {variable2}"`.
243
+
246
244
  **kwargs: keyword arguments to pass to the constructor.
247
245
 
248
246
  Returns:
@@ -331,7 +329,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
331
329
  """Human-readable representation.
332
330
 
333
331
  Args:
334
- html: Whether to format as HTML. Defaults to False.
332
+ html: Whether to format as HTML.
335
333
 
336
334
  Returns:
337
335
  Human-readable representation.
@@ -413,9 +411,9 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
413
411
  Args:
414
412
  template: a template.
415
413
  template_format: format of the template.
416
- Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
414
+ Options are: 'f-string', 'mustache', 'jinja2'.
417
415
  partial_variables: A dictionary of variables that can be used too partially.
418
- Defaults to None.
416
+
419
417
  **kwargs: keyword arguments to pass to the constructor.
420
418
 
421
419
  Returns:
@@ -638,7 +636,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
638
636
  """Human-readable representation.
639
637
 
640
638
  Args:
641
- html: Whether to format as HTML. Defaults to False.
639
+ html: Whether to format as HTML.
642
640
 
643
641
  Returns:
644
642
  Human-readable representation.
@@ -685,7 +683,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
685
683
 
686
684
  Args:
687
685
  **kwargs: keyword arguments to use for filling in template variables
688
- in all the template messages in this chat template.
686
+ in all the template messages in this chat template.
689
687
 
690
688
  Returns:
691
689
  formatted string.
@@ -697,7 +695,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
697
695
 
698
696
  Args:
699
697
  **kwargs: keyword arguments to use for filling in template variables
700
- in all the template messages in this chat template.
698
+ in all the template messages in this chat template.
701
699
 
702
700
  Returns:
703
701
  formatted string.
@@ -751,7 +749,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
751
749
  """Human-readable representation.
752
750
 
753
751
  Args:
754
- html: Whether to format as HTML. Defaults to False.
752
+ html: Whether to format as HTML.
755
753
 
756
754
  Returns:
757
755
  Human-readable representation.
@@ -781,78 +779,78 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
781
779
  Examples:
782
780
  !!! warning "Behavior changed in 0.2.24"
783
781
  You can pass any Message-like formats supported by
784
- ``ChatPromptTemplate.from_messages()`` directly to ``ChatPromptTemplate()``
782
+ `ChatPromptTemplate.from_messages()` directly to `ChatPromptTemplate()`
785
783
  init.
786
784
 
787
- .. code-block:: python
788
-
789
- from langchain_core.prompts import ChatPromptTemplate
785
+ ```python
786
+ from langchain_core.prompts import ChatPromptTemplate
790
787
 
791
- template = ChatPromptTemplate(
792
- [
793
- ("system", "You are a helpful AI bot. Your name is {name}."),
794
- ("human", "Hello, how are you doing?"),
795
- ("ai", "I'm doing well, thanks!"),
796
- ("human", "{user_input}"),
797
- ]
798
- )
788
+ template = ChatPromptTemplate(
789
+ [
790
+ ("system", "You are a helpful AI bot. Your name is {name}."),
791
+ ("human", "Hello, how are you doing?"),
792
+ ("ai", "I'm doing well, thanks!"),
793
+ ("human", "{user_input}"),
794
+ ]
795
+ )
799
796
 
800
- prompt_value = template.invoke(
801
- {
802
- "name": "Bob",
803
- "user_input": "What is your name?",
804
- }
805
- )
806
- # Output:
807
- # ChatPromptValue(
808
- # messages=[
809
- # SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
810
- # HumanMessage(content='Hello, how are you doing?'),
811
- # AIMessage(content="I'm doing well, thanks!"),
812
- # HumanMessage(content='What is your name?')
813
- # ]
814
- # )
797
+ prompt_value = template.invoke(
798
+ {
799
+ "name": "Bob",
800
+ "user_input": "What is your name?",
801
+ }
802
+ )
803
+ # Output:
804
+ # ChatPromptValue(
805
+ # messages=[
806
+ # SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
807
+ # HumanMessage(content='Hello, how are you doing?'),
808
+ # AIMessage(content="I'm doing well, thanks!"),
809
+ # HumanMessage(content='What is your name?')
810
+ # ]
811
+ # )
812
+ ```
815
813
 
816
814
  Messages Placeholder:
817
815
 
818
- .. code-block:: python
819
-
820
- # In addition to Human/AI/Tool/Function messages,
821
- # you can initialize the template with a MessagesPlaceholder
822
- # either using the class directly or with the shorthand tuple syntax:
816
+ ```python
817
+ # In addition to Human/AI/Tool/Function messages,
818
+ # you can initialize the template with a MessagesPlaceholder
819
+ # either using the class directly or with the shorthand tuple syntax:
820
+
821
+ template = ChatPromptTemplate(
822
+ [
823
+ ("system", "You are a helpful AI bot."),
824
+ # Means the template will receive an optional list of messages under
825
+ # the "conversation" key
826
+ ("placeholder", "{conversation}"),
827
+ # Equivalently:
828
+ # MessagesPlaceholder(variable_name="conversation", optional=True)
829
+ ]
830
+ )
823
831
 
824
- template = ChatPromptTemplate(
825
- [
826
- ("system", "You are a helpful AI bot."),
827
- # Means the template will receive an optional list of messages under
828
- # the "conversation" key
829
- ("placeholder", "{conversation}"),
830
- # Equivalently:
831
- # MessagesPlaceholder(variable_name="conversation", optional=True)
832
+ prompt_value = template.invoke(
833
+ {
834
+ "conversation": [
835
+ ("human", "Hi!"),
836
+ ("ai", "How can I assist you today?"),
837
+ ("human", "Can you make me an ice cream sundae?"),
838
+ ("ai", "No."),
832
839
  ]
833
- )
834
-
835
- prompt_value = template.invoke(
836
- {
837
- "conversation": [
838
- ("human", "Hi!"),
839
- ("ai", "How can I assist you today?"),
840
- ("human", "Can you make me an ice cream sundae?"),
841
- ("ai", "No."),
842
- ]
843
- }
844
- )
840
+ }
841
+ )
845
842
 
846
- # Output:
847
- # ChatPromptValue(
848
- # messages=[
849
- # SystemMessage(content='You are a helpful AI bot.'),
850
- # HumanMessage(content='Hi!'),
851
- # AIMessage(content='How can I assist you today?'),
852
- # HumanMessage(content='Can you make me an ice cream sundae?'),
853
- # AIMessage(content='No.'),
854
- # ]
855
- # )
843
+ # Output:
844
+ # ChatPromptValue(
845
+ # messages=[
846
+ # SystemMessage(content='You are a helpful AI bot.'),
847
+ # HumanMessage(content='Hi!'),
848
+ # AIMessage(content='How can I assist you today?'),
849
+ # HumanMessage(content='Can you make me an ice cream sundae?'),
850
+ # AIMessage(content='No.'),
851
+ # ]
852
+ # )
853
+ ```
856
854
 
857
855
  Single-variable template:
858
856
 
@@ -861,29 +859,28 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
861
859
  inject the provided argument into that variable location.
862
860
 
863
861
 
864
- .. code-block:: python
865
-
866
- from langchain_core.prompts import ChatPromptTemplate
862
+ ```python
863
+ from langchain_core.prompts import ChatPromptTemplate
867
864
 
868
- template = ChatPromptTemplate(
869
- [
870
- ("system", "You are a helpful AI bot. Your name is Carl."),
871
- ("human", "{user_input}"),
872
- ]
873
- )
874
-
875
- prompt_value = template.invoke("Hello, there!")
876
- # Equivalent to
877
- # prompt_value = template.invoke({"user_input": "Hello, there!"})
878
-
879
- # Output:
880
- # ChatPromptValue(
881
- # messages=[
882
- # SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
883
- # HumanMessage(content='Hello, there!'),
884
- # ]
885
- # )
865
+ template = ChatPromptTemplate(
866
+ [
867
+ ("system", "You are a helpful AI bot. Your name is Carl."),
868
+ ("human", "{user_input}"),
869
+ ]
870
+ )
886
871
 
872
+ prompt_value = template.invoke("Hello, there!")
873
+ # Equivalent to
874
+ # prompt_value = template.invoke({"user_input": "Hello, there!"})
875
+
876
+ # Output:
877
+ # ChatPromptValue(
878
+ # messages=[
879
+ # SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
880
+ # HumanMessage(content='Hello, there!'),
881
+ # ]
882
+ # )
883
+ ```
887
884
  """ # noqa: E501
888
885
 
889
886
  messages: Annotated[list[MessageLike], SkipValidation()]
@@ -902,12 +899,12 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
902
899
 
903
900
  Args:
904
901
  messages: sequence of message representations.
905
- A message can be represented using the following formats:
906
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
907
- (message type, template); e.g., ("human", "{user_input}"),
908
- (4) 2-tuple of (message class, template), (5) a string which is
909
- shorthand for ("human", template); e.g., "{user_input}".
910
- template_format: format of the template. Defaults to "f-string".
902
+ A message can be represented using the following formats:
903
+ (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
904
+ (message type, template); e.g., ("human", "{user_input}"),
905
+ (4) 2-tuple of (message class, template), (5) a string which is
906
+ shorthand for ("human", template); e.g., "{user_input}".
907
+ template_format: format of the template.
911
908
  input_variables: A list of the names of the variables whose values are
912
909
  required as inputs to the prompt.
913
910
  optional_variables: A list of the names of the variables for placeholder
@@ -924,27 +921,26 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
924
921
  Examples:
925
922
  Instantiation from a list of message templates:
926
923
 
927
- .. code-block:: python
928
-
929
- template = ChatPromptTemplate(
930
- [
931
- ("human", "Hello, how are you?"),
932
- ("ai", "I'm doing well, thanks!"),
933
- ("human", "That's good to hear."),
934
- ]
935
- )
924
+ ```python
925
+ template = ChatPromptTemplate(
926
+ [
927
+ ("human", "Hello, how are you?"),
928
+ ("ai", "I'm doing well, thanks!"),
929
+ ("human", "That's good to hear."),
930
+ ]
931
+ )
932
+ ```
936
933
 
937
934
  Instantiation from mixed message formats:
938
935
 
939
- .. code-block:: python
940
-
941
- template = ChatPromptTemplate(
942
- [
943
- SystemMessage(content="hello"),
944
- ("human", "Hello, how are you?"),
945
- ]
946
- )
947
-
936
+ ```python
937
+ template = ChatPromptTemplate(
938
+ [
939
+ SystemMessage(content="hello"),
940
+ ("human", "Hello, how are you?"),
941
+ ]
942
+ )
943
+ ```
948
944
  """
949
945
  messages_ = [
950
946
  _convert_to_message_template(message, template_format)
@@ -974,10 +970,10 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
974
970
 
975
971
  @classmethod
976
972
  def get_lc_namespace(cls) -> list[str]:
977
- """Get the namespace of the langchain object.
973
+ """Get the namespace of the LangChain object.
978
974
 
979
975
  Returns:
980
- ``["langchain", "prompts", "chat"]``
976
+ `["langchain", "prompts", "chat"]`
981
977
  """
982
978
  return ["langchain", "prompts", "chat"]
983
979
 
@@ -1104,35 +1100,34 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1104
1100
  Examples:
1105
1101
  Instantiation from a list of message templates:
1106
1102
 
1107
- .. code-block:: python
1108
-
1109
- template = ChatPromptTemplate.from_messages(
1110
- [
1111
- ("human", "Hello, how are you?"),
1112
- ("ai", "I'm doing well, thanks!"),
1113
- ("human", "That's good to hear."),
1114
- ]
1115
- )
1103
+ ```python
1104
+ template = ChatPromptTemplate.from_messages(
1105
+ [
1106
+ ("human", "Hello, how are you?"),
1107
+ ("ai", "I'm doing well, thanks!"),
1108
+ ("human", "That's good to hear."),
1109
+ ]
1110
+ )
1111
+ ```
1116
1112
 
1117
1113
  Instantiation from mixed message formats:
1118
1114
 
1119
- .. code-block:: python
1120
-
1121
- template = ChatPromptTemplate.from_messages(
1122
- [
1123
- SystemMessage(content="hello"),
1124
- ("human", "Hello, how are you?"),
1125
- ]
1126
- )
1127
-
1115
+ ```python
1116
+ template = ChatPromptTemplate.from_messages(
1117
+ [
1118
+ SystemMessage(content="hello"),
1119
+ ("human", "Hello, how are you?"),
1120
+ ]
1121
+ )
1122
+ ```
1128
1123
  Args:
1129
1124
  messages: sequence of message representations.
1130
- A message can be represented using the following formats:
1131
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
1132
- (message type, template); e.g., ("human", "{user_input}"),
1133
- (4) 2-tuple of (message class, template), (5) a string which is
1134
- shorthand for ("human", template); e.g., "{user_input}".
1135
- template_format: format of the template. Defaults to "f-string".
1125
+ A message can be represented using the following formats:
1126
+ (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
1127
+ (message type, template); e.g., ("human", "{user_input}"),
1128
+ (4) 2-tuple of (message class, template), (5) a string which is
1129
+ shorthand for ("human", template); e.g., "{user_input}".
1130
+ template_format: format of the template.
1136
1131
 
1137
1132
  Returns:
1138
1133
  a chat prompt template.
@@ -1145,7 +1140,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1145
1140
 
1146
1141
  Args:
1147
1142
  **kwargs: keyword arguments to use for filling in template variables
1148
- in all the template messages in this chat template.
1143
+ in all the template messages in this chat template.
1149
1144
 
1150
1145
  Raises:
1151
1146
  ValueError: if messages are of unexpected types.
@@ -1173,7 +1168,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1173
1168
 
1174
1169
  Args:
1175
1170
  **kwargs: keyword arguments to use for filling in template variables
1176
- in all the template messages in this chat template.
1171
+ in all the template messages in this chat template.
1177
1172
 
1178
1173
  Returns:
1179
1174
  list of formatted messages.
@@ -1208,23 +1203,21 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1208
1203
 
1209
1204
 
1210
1205
  Example:
1206
+ ```python
1207
+ from langchain_core.prompts import ChatPromptTemplate
1211
1208
 
1212
- .. code-block:: python
1213
-
1214
- from langchain_core.prompts import ChatPromptTemplate
1215
-
1216
- template = ChatPromptTemplate.from_messages(
1217
- [
1218
- ("system", "You are an AI assistant named {name}."),
1219
- ("human", "Hi I'm {user}"),
1220
- ("ai", "Hi there, {user}, I'm {name}."),
1221
- ("human", "{input}"),
1222
- ]
1223
- )
1224
- template2 = template.partial(user="Lucy", name="R2D2")
1225
-
1226
- template2.format_messages(input="hello")
1209
+ template = ChatPromptTemplate.from_messages(
1210
+ [
1211
+ ("system", "You are an AI assistant named {name}."),
1212
+ ("human", "Hi I'm {user}"),
1213
+ ("ai", "Hi there, {user}, I'm {name}."),
1214
+ ("human", "{input}"),
1215
+ ]
1216
+ )
1217
+ template2 = template.partial(user="Lucy", name="R2D2")
1227
1218
 
1219
+ template2.format_messages(input="hello")
1220
+ ```
1228
1221
  """
1229
1222
  prompt_dict = self.__dict__.copy()
1230
1223
  prompt_dict["input_variables"] = list(
@@ -1262,7 +1255,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1262
1255
 
1263
1256
  Returns:
1264
1257
  If index is an int, returns the message at that index.
1265
- If index is a slice, returns a new ``ChatPromptTemplate``
1258
+ If index is a slice, returns a new `ChatPromptTemplate`
1266
1259
  containing the messages in that slice.
1267
1260
  """
1268
1261
  if isinstance(index, slice):
@@ -1293,7 +1286,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1293
1286
  """Human-readable representation.
1294
1287
 
1295
1288
  Args:
1296
- html: Whether to format as HTML. Defaults to False.
1289
+ html: Whether to format as HTML.
1297
1290
 
1298
1291
  Returns:
1299
1292
  Human-readable representation.
@@ -1312,7 +1305,7 @@ def _create_template_from_message_type(
1312
1305
  Args:
1313
1306
  message_type: str the type of the message template (e.g., "human", "ai", etc.)
1314
1307
  template: str the template string.
1315
- template_format: format of the template. Defaults to "f-string".
1308
+ template_format: format of the template.
1316
1309
 
1317
1310
  Returns:
1318
1311
  a message prompt template of the appropriate type.
@@ -1389,7 +1382,7 @@ def _convert_to_message_template(
1389
1382
 
1390
1383
  Args:
1391
1384
  message: a representation of a message in one of the supported formats.
1392
- template_format: format of the template. Defaults to "f-string".
1385
+ template_format: format of the template.
1393
1386
 
1394
1387
  Returns:
1395
1388
  an instance of a message or a message template.