langchain-core 1.0.0a7__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (135) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +7 -6
  7. langchain_core/caches.py +4 -10
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +76 -55
  16. langchain_core/document_loaders/langsmith.py +21 -21
  17. langchain_core/documents/__init__.py +0 -1
  18. langchain_core/documents/base.py +37 -40
  19. langchain_core/documents/transformers.py +28 -29
  20. langchain_core/embeddings/fake.py +46 -52
  21. langchain_core/exceptions.py +5 -5
  22. langchain_core/indexing/api.py +11 -11
  23. langchain_core/indexing/base.py +24 -24
  24. langchain_core/language_models/__init__.py +0 -2
  25. langchain_core/language_models/_utils.py +51 -53
  26. langchain_core/language_models/base.py +23 -24
  27. langchain_core/language_models/chat_models.py +121 -144
  28. langchain_core/language_models/fake_chat_models.py +5 -5
  29. langchain_core/language_models/llms.py +10 -12
  30. langchain_core/load/dump.py +1 -1
  31. langchain_core/load/load.py +16 -16
  32. langchain_core/load/serializable.py +35 -34
  33. langchain_core/messages/__init__.py +1 -16
  34. langchain_core/messages/ai.py +105 -104
  35. langchain_core/messages/base.py +26 -26
  36. langchain_core/messages/block_translators/__init__.py +17 -17
  37. langchain_core/messages/block_translators/anthropic.py +2 -2
  38. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  39. langchain_core/messages/block_translators/google_genai.py +2 -2
  40. langchain_core/messages/block_translators/groq.py +117 -21
  41. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  42. langchain_core/messages/block_translators/openai.py +4 -4
  43. langchain_core/messages/chat.py +1 -1
  44. langchain_core/messages/content.py +189 -193
  45. langchain_core/messages/function.py +5 -5
  46. langchain_core/messages/human.py +15 -17
  47. langchain_core/messages/modifier.py +1 -1
  48. langchain_core/messages/system.py +12 -14
  49. langchain_core/messages/tool.py +45 -49
  50. langchain_core/messages/utils.py +384 -396
  51. langchain_core/output_parsers/__init__.py +1 -14
  52. langchain_core/output_parsers/base.py +22 -23
  53. langchain_core/output_parsers/json.py +3 -3
  54. langchain_core/output_parsers/list.py +1 -1
  55. langchain_core/output_parsers/openai_functions.py +46 -44
  56. langchain_core/output_parsers/openai_tools.py +7 -7
  57. langchain_core/output_parsers/pydantic.py +10 -11
  58. langchain_core/output_parsers/string.py +1 -1
  59. langchain_core/output_parsers/transform.py +2 -2
  60. langchain_core/output_parsers/xml.py +1 -1
  61. langchain_core/outputs/__init__.py +1 -1
  62. langchain_core/outputs/chat_generation.py +14 -14
  63. langchain_core/outputs/generation.py +5 -5
  64. langchain_core/outputs/llm_result.py +5 -5
  65. langchain_core/prompt_values.py +5 -5
  66. langchain_core/prompts/__init__.py +3 -23
  67. langchain_core/prompts/base.py +32 -37
  68. langchain_core/prompts/chat.py +216 -222
  69. langchain_core/prompts/dict.py +2 -2
  70. langchain_core/prompts/few_shot.py +76 -83
  71. langchain_core/prompts/few_shot_with_templates.py +6 -8
  72. langchain_core/prompts/image.py +11 -13
  73. langchain_core/prompts/loading.py +1 -1
  74. langchain_core/prompts/message.py +2 -2
  75. langchain_core/prompts/prompt.py +14 -16
  76. langchain_core/prompts/string.py +19 -7
  77. langchain_core/prompts/structured.py +24 -25
  78. langchain_core/rate_limiters.py +36 -38
  79. langchain_core/retrievers.py +41 -182
  80. langchain_core/runnables/base.py +565 -590
  81. langchain_core/runnables/branch.py +7 -7
  82. langchain_core/runnables/config.py +37 -44
  83. langchain_core/runnables/configurable.py +8 -9
  84. langchain_core/runnables/fallbacks.py +8 -8
  85. langchain_core/runnables/graph.py +28 -27
  86. langchain_core/runnables/graph_ascii.py +19 -18
  87. langchain_core/runnables/graph_mermaid.py +20 -31
  88. langchain_core/runnables/graph_png.py +7 -7
  89. langchain_core/runnables/history.py +20 -20
  90. langchain_core/runnables/passthrough.py +8 -8
  91. langchain_core/runnables/retry.py +3 -3
  92. langchain_core/runnables/router.py +1 -1
  93. langchain_core/runnables/schema.py +33 -33
  94. langchain_core/runnables/utils.py +30 -34
  95. langchain_core/stores.py +72 -102
  96. langchain_core/sys_info.py +27 -29
  97. langchain_core/tools/__init__.py +1 -14
  98. langchain_core/tools/base.py +63 -63
  99. langchain_core/tools/convert.py +92 -92
  100. langchain_core/tools/render.py +9 -9
  101. langchain_core/tools/retriever.py +1 -1
  102. langchain_core/tools/simple.py +6 -7
  103. langchain_core/tools/structured.py +17 -18
  104. langchain_core/tracers/__init__.py +1 -9
  105. langchain_core/tracers/base.py +35 -35
  106. langchain_core/tracers/context.py +12 -17
  107. langchain_core/tracers/event_stream.py +3 -3
  108. langchain_core/tracers/langchain.py +8 -8
  109. langchain_core/tracers/log_stream.py +17 -18
  110. langchain_core/tracers/memory_stream.py +2 -2
  111. langchain_core/tracers/schemas.py +0 -129
  112. langchain_core/utils/aiter.py +31 -31
  113. langchain_core/utils/env.py +5 -5
  114. langchain_core/utils/function_calling.py +48 -120
  115. langchain_core/utils/html.py +4 -4
  116. langchain_core/utils/input.py +2 -2
  117. langchain_core/utils/interactive_env.py +1 -1
  118. langchain_core/utils/iter.py +19 -19
  119. langchain_core/utils/json.py +1 -1
  120. langchain_core/utils/json_schema.py +2 -2
  121. langchain_core/utils/mustache.py +5 -5
  122. langchain_core/utils/pydantic.py +17 -17
  123. langchain_core/utils/strings.py +4 -4
  124. langchain_core/utils/utils.py +25 -28
  125. langchain_core/vectorstores/base.py +43 -64
  126. langchain_core/vectorstores/in_memory.py +83 -85
  127. langchain_core/version.py +1 -1
  128. {langchain_core-1.0.0a7.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
  129. langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
  130. langchain_core/memory.py +0 -120
  131. langchain_core/pydantic_v1/__init__.py +0 -30
  132. langchain_core/pydantic_v1/dataclasses.py +0 -23
  133. langchain_core/pydantic_v1/main.py +0 -23
  134. langchain_core-1.0.0a7.dist-info/RECORD +0 -176
  135. {langchain_core-1.0.0a7.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
@@ -59,84 +59,83 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
59
59
 
60
60
  Direct usage:
61
61
 
62
- .. code-block:: python
62
+ ```python
63
+ from langchain_core.prompts import MessagesPlaceholder
63
64
 
64
- from langchain_core.prompts import MessagesPlaceholder
65
+ prompt = MessagesPlaceholder("history")
66
+ prompt.format_messages() # raises KeyError
65
67
 
66
- prompt = MessagesPlaceholder("history")
67
- prompt.format_messages() # raises KeyError
68
+ prompt = MessagesPlaceholder("history", optional=True)
69
+ prompt.format_messages() # returns empty list []
68
70
 
69
- prompt = MessagesPlaceholder("history", optional=True)
70
- prompt.format_messages() # returns empty list []
71
-
72
- prompt.format_messages(
73
- history=[
74
- ("system", "You are an AI assistant."),
75
- ("human", "Hello!"),
76
- ]
77
- )
78
- # -> [
79
- # SystemMessage(content="You are an AI assistant."),
80
- # HumanMessage(content="Hello!"),
81
- # ]
71
+ prompt.format_messages(
72
+ history=[
73
+ ("system", "You are an AI assistant."),
74
+ ("human", "Hello!"),
75
+ ]
76
+ )
77
+ # -> [
78
+ # SystemMessage(content="You are an AI assistant."),
79
+ # HumanMessage(content="Hello!"),
80
+ # ]
81
+ ```
82
82
 
83
83
  Building a prompt with chat history:
84
84
 
85
- .. code-block:: python
86
-
87
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
85
+ ```python
86
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
88
87
 
89
- prompt = ChatPromptTemplate.from_messages(
90
- [
91
- ("system", "You are a helpful assistant."),
92
- MessagesPlaceholder("history"),
93
- ("human", "{question}"),
94
- ]
95
- )
96
- prompt.invoke(
97
- {
98
- "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
99
- "question": "now multiply that by 4",
100
- }
101
- )
102
- # -> ChatPromptValue(messages=[
103
- # SystemMessage(content="You are a helpful assistant."),
104
- # HumanMessage(content="what's 5 + 2"),
105
- # AIMessage(content="5 + 2 is 7"),
106
- # HumanMessage(content="now multiply that by 4"),
107
- # ])
88
+ prompt = ChatPromptTemplate.from_messages(
89
+ [
90
+ ("system", "You are a helpful assistant."),
91
+ MessagesPlaceholder("history"),
92
+ ("human", "{question}"),
93
+ ]
94
+ )
95
+ prompt.invoke(
96
+ {
97
+ "history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
98
+ "question": "now multiply that by 4",
99
+ }
100
+ )
101
+ # -> ChatPromptValue(messages=[
102
+ # SystemMessage(content="You are a helpful assistant."),
103
+ # HumanMessage(content="what's 5 + 2"),
104
+ # AIMessage(content="5 + 2 is 7"),
105
+ # HumanMessage(content="now multiply that by 4"),
106
+ # ])
107
+ ```
108
108
 
109
109
  Limiting the number of messages:
110
110
 
111
- .. code-block:: python
112
-
113
- from langchain_core.prompts import MessagesPlaceholder
114
-
115
- prompt = MessagesPlaceholder("history", n_messages=1)
111
+ ```python
112
+ from langchain_core.prompts import MessagesPlaceholder
116
113
 
117
- prompt.format_messages(
118
- history=[
119
- ("system", "You are an AI assistant."),
120
- ("human", "Hello!"),
121
- ]
122
- )
123
- # -> [
124
- # HumanMessage(content="Hello!"),
125
- # ]
114
+ prompt = MessagesPlaceholder("history", n_messages=1)
126
115
 
116
+ prompt.format_messages(
117
+ history=[
118
+ ("system", "You are an AI assistant."),
119
+ ("human", "Hello!"),
120
+ ]
121
+ )
122
+ # -> [
123
+ # HumanMessage(content="Hello!"),
124
+ # ]
125
+ ```
127
126
  """
128
127
 
129
128
  variable_name: str
130
129
  """Name of variable to use as messages."""
131
130
 
132
131
  optional: bool = False
133
- """If True format_messages can be called with no arguments and will return an empty
134
- list. If False then a named argument with name `variable_name` must be passed
135
- in, even if the value is an empty list."""
132
+ """If `True` format_messages can be called with no arguments and will return an
133
+ empty list. If `False` then a named argument with name `variable_name` must be
134
+ passed in, even if the value is an empty list."""
136
135
 
137
136
  n_messages: PositiveInt | None = None
138
- """Maximum number of messages to include. If None, then will include all.
139
- Defaults to None."""
137
+ """Maximum number of messages to include. If `None`, then will include all.
138
+ """
140
139
 
141
140
  def __init__(
142
141
  self, variable_name: str, *, optional: bool = False, **kwargs: Any
@@ -145,10 +144,10 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
145
144
 
146
145
  Args:
147
146
  variable_name: Name of variable to use as messages.
148
- optional: If True format_messages can be called with no arguments and will
149
- return an empty list. If False then a named argument with name
147
+ optional: If `True` format_messages can be called with no arguments and will
148
+ return an empty list. If `False` then a named argument with name
150
149
  `variable_name` must be passed in, even if the value is an empty list.
151
- Defaults to False.]
150
+ Defaults to `False`.]
152
151
  """
153
152
  # mypy can't detect the init which is defined in the parent class
154
153
  # b/c these are BaseModel classes.
@@ -196,7 +195,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
196
195
  """Human-readable representation.
197
196
 
198
197
  Args:
199
- html: Whether to format as HTML. Defaults to False.
198
+ html: Whether to format as HTML. Defaults to `False`.
200
199
 
201
200
  Returns:
202
201
  Human-readable representation.
@@ -238,11 +237,11 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
238
237
  template: a template.
239
238
  template_format: format of the template. Defaults to "f-string".
240
239
  partial_variables: A dictionary of variables that can be used to partially
241
- fill in the template. For example, if the template is
242
- `"{variable1} {variable2}"`, and `partial_variables` is
243
- `{"variable1": "foo"}`, then the final prompt will be
244
- `"foo {variable2}"`.
245
- Defaults to None.
240
+ fill in the template. For example, if the template is
241
+ `"{variable1} {variable2}"`, and `partial_variables` is
242
+ `{"variable1": "foo"}`, then the final prompt will be
243
+ `"foo {variable2}"`.
244
+
246
245
  **kwargs: keyword arguments to pass to the constructor.
247
246
 
248
247
  Returns:
@@ -331,7 +330,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
331
330
  """Human-readable representation.
332
331
 
333
332
  Args:
334
- html: Whether to format as HTML. Defaults to False.
333
+ html: Whether to format as HTML. Defaults to `False`.
335
334
 
336
335
  Returns:
337
336
  Human-readable representation.
@@ -415,7 +414,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
415
414
  template_format: format of the template.
416
415
  Options are: 'f-string', 'mustache', 'jinja2'. Defaults to "f-string".
417
416
  partial_variables: A dictionary of variables that can be used too partially.
418
- Defaults to None.
417
+
419
418
  **kwargs: keyword arguments to pass to the constructor.
420
419
 
421
420
  Returns:
@@ -638,7 +637,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
638
637
  """Human-readable representation.
639
638
 
640
639
  Args:
641
- html: Whether to format as HTML. Defaults to False.
640
+ html: Whether to format as HTML. Defaults to `False`.
642
641
 
643
642
  Returns:
644
643
  Human-readable representation.
@@ -685,7 +684,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
685
684
 
686
685
  Args:
687
686
  **kwargs: keyword arguments to use for filling in template variables
688
- in all the template messages in this chat template.
687
+ in all the template messages in this chat template.
689
688
 
690
689
  Returns:
691
690
  formatted string.
@@ -697,7 +696,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
697
696
 
698
697
  Args:
699
698
  **kwargs: keyword arguments to use for filling in template variables
700
- in all the template messages in this chat template.
699
+ in all the template messages in this chat template.
701
700
 
702
701
  Returns:
703
702
  formatted string.
@@ -751,7 +750,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
751
750
  """Human-readable representation.
752
751
 
753
752
  Args:
754
- html: Whether to format as HTML. Defaults to False.
753
+ html: Whether to format as HTML. Defaults to `False`.
755
754
 
756
755
  Returns:
757
756
  Human-readable representation.
@@ -781,78 +780,78 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
781
780
  Examples:
782
781
  !!! warning "Behavior changed in 0.2.24"
783
782
  You can pass any Message-like formats supported by
784
- ``ChatPromptTemplate.from_messages()`` directly to ``ChatPromptTemplate()``
783
+ `ChatPromptTemplate.from_messages()` directly to `ChatPromptTemplate()`
785
784
  init.
786
785
 
787
- .. code-block:: python
788
-
789
- from langchain_core.prompts import ChatPromptTemplate
786
+ ```python
787
+ from langchain_core.prompts import ChatPromptTemplate
790
788
 
791
- template = ChatPromptTemplate(
792
- [
793
- ("system", "You are a helpful AI bot. Your name is {name}."),
794
- ("human", "Hello, how are you doing?"),
795
- ("ai", "I'm doing well, thanks!"),
796
- ("human", "{user_input}"),
797
- ]
798
- )
789
+ template = ChatPromptTemplate(
790
+ [
791
+ ("system", "You are a helpful AI bot. Your name is {name}."),
792
+ ("human", "Hello, how are you doing?"),
793
+ ("ai", "I'm doing well, thanks!"),
794
+ ("human", "{user_input}"),
795
+ ]
796
+ )
799
797
 
800
- prompt_value = template.invoke(
801
- {
802
- "name": "Bob",
803
- "user_input": "What is your name?",
804
- }
805
- )
806
- # Output:
807
- # ChatPromptValue(
808
- # messages=[
809
- # SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
810
- # HumanMessage(content='Hello, how are you doing?'),
811
- # AIMessage(content="I'm doing well, thanks!"),
812
- # HumanMessage(content='What is your name?')
813
- # ]
814
- # )
798
+ prompt_value = template.invoke(
799
+ {
800
+ "name": "Bob",
801
+ "user_input": "What is your name?",
802
+ }
803
+ )
804
+ # Output:
805
+ # ChatPromptValue(
806
+ # messages=[
807
+ # SystemMessage(content='You are a helpful AI bot. Your name is Bob.'),
808
+ # HumanMessage(content='Hello, how are you doing?'),
809
+ # AIMessage(content="I'm doing well, thanks!"),
810
+ # HumanMessage(content='What is your name?')
811
+ # ]
812
+ # )
813
+ ```
815
814
 
816
815
  Messages Placeholder:
817
816
 
818
- .. code-block:: python
819
-
820
- # In addition to Human/AI/Tool/Function messages,
821
- # you can initialize the template with a MessagesPlaceholder
822
- # either using the class directly or with the shorthand tuple syntax:
817
+ ```python
818
+ # In addition to Human/AI/Tool/Function messages,
819
+ # you can initialize the template with a MessagesPlaceholder
820
+ # either using the class directly or with the shorthand tuple syntax:
821
+
822
+ template = ChatPromptTemplate(
823
+ [
824
+ ("system", "You are a helpful AI bot."),
825
+ # Means the template will receive an optional list of messages under
826
+ # the "conversation" key
827
+ ("placeholder", "{conversation}"),
828
+ # Equivalently:
829
+ # MessagesPlaceholder(variable_name="conversation", optional=True)
830
+ ]
831
+ )
823
832
 
824
- template = ChatPromptTemplate(
825
- [
826
- ("system", "You are a helpful AI bot."),
827
- # Means the template will receive an optional list of messages under
828
- # the "conversation" key
829
- ("placeholder", "{conversation}"),
830
- # Equivalently:
831
- # MessagesPlaceholder(variable_name="conversation", optional=True)
833
+ prompt_value = template.invoke(
834
+ {
835
+ "conversation": [
836
+ ("human", "Hi!"),
837
+ ("ai", "How can I assist you today?"),
838
+ ("human", "Can you make me an ice cream sundae?"),
839
+ ("ai", "No."),
832
840
  ]
833
- )
834
-
835
- prompt_value = template.invoke(
836
- {
837
- "conversation": [
838
- ("human", "Hi!"),
839
- ("ai", "How can I assist you today?"),
840
- ("human", "Can you make me an ice cream sundae?"),
841
- ("ai", "No."),
842
- ]
843
- }
844
- )
841
+ }
842
+ )
845
843
 
846
- # Output:
847
- # ChatPromptValue(
848
- # messages=[
849
- # SystemMessage(content='You are a helpful AI bot.'),
850
- # HumanMessage(content='Hi!'),
851
- # AIMessage(content='How can I assist you today?'),
852
- # HumanMessage(content='Can you make me an ice cream sundae?'),
853
- # AIMessage(content='No.'),
854
- # ]
855
- # )
844
+ # Output:
845
+ # ChatPromptValue(
846
+ # messages=[
847
+ # SystemMessage(content='You are a helpful AI bot.'),
848
+ # HumanMessage(content='Hi!'),
849
+ # AIMessage(content='How can I assist you today?'),
850
+ # HumanMessage(content='Can you make me an ice cream sundae?'),
851
+ # AIMessage(content='No.'),
852
+ # ]
853
+ # )
854
+ ```
856
855
 
857
856
  Single-variable template:
858
857
 
@@ -861,29 +860,28 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
861
860
  inject the provided argument into that variable location.
862
861
 
863
862
 
864
- .. code-block:: python
865
-
866
- from langchain_core.prompts import ChatPromptTemplate
867
-
868
- template = ChatPromptTemplate(
869
- [
870
- ("system", "You are a helpful AI bot. Your name is Carl."),
871
- ("human", "{user_input}"),
872
- ]
873
- )
874
-
875
- prompt_value = template.invoke("Hello, there!")
876
- # Equivalent to
877
- # prompt_value = template.invoke({"user_input": "Hello, there!"})
863
+ ```python
864
+ from langchain_core.prompts import ChatPromptTemplate
878
865
 
879
- # Output:
880
- # ChatPromptValue(
881
- # messages=[
882
- # SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
883
- # HumanMessage(content='Hello, there!'),
884
- # ]
885
- # )
866
+ template = ChatPromptTemplate(
867
+ [
868
+ ("system", "You are a helpful AI bot. Your name is Carl."),
869
+ ("human", "{user_input}"),
870
+ ]
871
+ )
886
872
 
873
+ prompt_value = template.invoke("Hello, there!")
874
+ # Equivalent to
875
+ # prompt_value = template.invoke({"user_input": "Hello, there!"})
876
+
877
+ # Output:
878
+ # ChatPromptValue(
879
+ # messages=[
880
+ # SystemMessage(content='You are a helpful AI bot. Your name is Carl.'),
881
+ # HumanMessage(content='Hello, there!'),
882
+ # ]
883
+ # )
884
+ ```
887
885
  """ # noqa: E501
888
886
 
889
887
  messages: Annotated[list[MessageLike], SkipValidation()]
@@ -902,11 +900,11 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
902
900
 
903
901
  Args:
904
902
  messages: sequence of message representations.
905
- A message can be represented using the following formats:
906
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
907
- (message type, template); e.g., ("human", "{user_input}"),
908
- (4) 2-tuple of (message class, template), (5) a string which is
909
- shorthand for ("human", template); e.g., "{user_input}".
903
+ A message can be represented using the following formats:
904
+ (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
905
+ (message type, template); e.g., ("human", "{user_input}"),
906
+ (4) 2-tuple of (message class, template), (5) a string which is
907
+ shorthand for ("human", template); e.g., "{user_input}".
910
908
  template_format: format of the template. Defaults to "f-string".
911
909
  input_variables: A list of the names of the variables whose values are
912
910
  required as inputs to the prompt.
@@ -924,27 +922,26 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
924
922
  Examples:
925
923
  Instantiation from a list of message templates:
926
924
 
927
- .. code-block:: python
928
-
929
- template = ChatPromptTemplate(
930
- [
931
- ("human", "Hello, how are you?"),
932
- ("ai", "I'm doing well, thanks!"),
933
- ("human", "That's good to hear."),
934
- ]
935
- )
925
+ ```python
926
+ template = ChatPromptTemplate(
927
+ [
928
+ ("human", "Hello, how are you?"),
929
+ ("ai", "I'm doing well, thanks!"),
930
+ ("human", "That's good to hear."),
931
+ ]
932
+ )
933
+ ```
936
934
 
937
935
  Instantiation from mixed message formats:
938
936
 
939
- .. code-block:: python
940
-
941
- template = ChatPromptTemplate(
942
- [
943
- SystemMessage(content="hello"),
944
- ("human", "Hello, how are you?"),
945
- ]
946
- )
947
-
937
+ ```python
938
+ template = ChatPromptTemplate(
939
+ [
940
+ SystemMessage(content="hello"),
941
+ ("human", "Hello, how are you?"),
942
+ ]
943
+ )
944
+ ```
948
945
  """
949
946
  messages_ = [
950
947
  _convert_to_message_template(message, template_format)
@@ -977,7 +974,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
977
974
  """Get the namespace of the langchain object.
978
975
 
979
976
  Returns:
980
- ``["langchain", "prompts", "chat"]``
977
+ `["langchain", "prompts", "chat"]`
981
978
  """
982
979
  return ["langchain", "prompts", "chat"]
983
980
 
@@ -1104,34 +1101,33 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1104
1101
  Examples:
1105
1102
  Instantiation from a list of message templates:
1106
1103
 
1107
- .. code-block:: python
1108
-
1109
- template = ChatPromptTemplate.from_messages(
1110
- [
1111
- ("human", "Hello, how are you?"),
1112
- ("ai", "I'm doing well, thanks!"),
1113
- ("human", "That's good to hear."),
1114
- ]
1115
- )
1104
+ ```python
1105
+ template = ChatPromptTemplate.from_messages(
1106
+ [
1107
+ ("human", "Hello, how are you?"),
1108
+ ("ai", "I'm doing well, thanks!"),
1109
+ ("human", "That's good to hear."),
1110
+ ]
1111
+ )
1112
+ ```
1116
1113
 
1117
1114
  Instantiation from mixed message formats:
1118
1115
 
1119
- .. code-block:: python
1120
-
1121
- template = ChatPromptTemplate.from_messages(
1122
- [
1123
- SystemMessage(content="hello"),
1124
- ("human", "Hello, how are you?"),
1125
- ]
1126
- )
1127
-
1116
+ ```python
1117
+ template = ChatPromptTemplate.from_messages(
1118
+ [
1119
+ SystemMessage(content="hello"),
1120
+ ("human", "Hello, how are you?"),
1121
+ ]
1122
+ )
1123
+ ```
1128
1124
  Args:
1129
1125
  messages: sequence of message representations.
1130
- A message can be represented using the following formats:
1131
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
1132
- (message type, template); e.g., ("human", "{user_input}"),
1133
- (4) 2-tuple of (message class, template), (5) a string which is
1134
- shorthand for ("human", template); e.g., "{user_input}".
1126
+ A message can be represented using the following formats:
1127
+ (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
1128
+ (message type, template); e.g., ("human", "{user_input}"),
1129
+ (4) 2-tuple of (message class, template), (5) a string which is
1130
+ shorthand for ("human", template); e.g., "{user_input}".
1135
1131
  template_format: format of the template. Defaults to "f-string".
1136
1132
 
1137
1133
  Returns:
@@ -1145,7 +1141,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1145
1141
 
1146
1142
  Args:
1147
1143
  **kwargs: keyword arguments to use for filling in template variables
1148
- in all the template messages in this chat template.
1144
+ in all the template messages in this chat template.
1149
1145
 
1150
1146
  Raises:
1151
1147
  ValueError: if messages are of unexpected types.
@@ -1173,7 +1169,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1173
1169
 
1174
1170
  Args:
1175
1171
  **kwargs: keyword arguments to use for filling in template variables
1176
- in all the template messages in this chat template.
1172
+ in all the template messages in this chat template.
1177
1173
 
1178
1174
  Returns:
1179
1175
  list of formatted messages.
@@ -1208,23 +1204,21 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1208
1204
 
1209
1205
 
1210
1206
  Example:
1207
+ ```python
1208
+ from langchain_core.prompts import ChatPromptTemplate
1211
1209
 
1212
- .. code-block:: python
1213
-
1214
- from langchain_core.prompts import ChatPromptTemplate
1215
-
1216
- template = ChatPromptTemplate.from_messages(
1217
- [
1218
- ("system", "You are an AI assistant named {name}."),
1219
- ("human", "Hi I'm {user}"),
1220
- ("ai", "Hi there, {user}, I'm {name}."),
1221
- ("human", "{input}"),
1222
- ]
1223
- )
1224
- template2 = template.partial(user="Lucy", name="R2D2")
1225
-
1226
- template2.format_messages(input="hello")
1210
+ template = ChatPromptTemplate.from_messages(
1211
+ [
1212
+ ("system", "You are an AI assistant named {name}."),
1213
+ ("human", "Hi I'm {user}"),
1214
+ ("ai", "Hi there, {user}, I'm {name}."),
1215
+ ("human", "{input}"),
1216
+ ]
1217
+ )
1218
+ template2 = template.partial(user="Lucy", name="R2D2")
1227
1219
 
1220
+ template2.format_messages(input="hello")
1221
+ ```
1228
1222
  """
1229
1223
  prompt_dict = self.__dict__.copy()
1230
1224
  prompt_dict["input_variables"] = list(
@@ -1262,7 +1256,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1262
1256
 
1263
1257
  Returns:
1264
1258
  If index is an int, returns the message at that index.
1265
- If index is a slice, returns a new ``ChatPromptTemplate``
1259
+ If index is a slice, returns a new `ChatPromptTemplate`
1266
1260
  containing the messages in that slice.
1267
1261
  """
1268
1262
  if isinstance(index, slice):
@@ -1293,7 +1287,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
1293
1287
  """Human-readable representation.
1294
1288
 
1295
1289
  Args:
1296
- html: Whether to format as HTML. Defaults to False.
1290
+ html: Whether to format as HTML. Defaults to `False`.
1297
1291
 
1298
1292
  Returns:
1299
1293
  Human-readable representation.
@@ -77,7 +77,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
77
77
  """Get the namespace of the langchain object.
78
78
 
79
79
  Returns:
80
- ``["langchain_core", "prompts", "dict"]``
80
+ `["langchain_core", "prompts", "dict"]`
81
81
  """
82
82
  return ["langchain_core", "prompts", "dict"]
83
83
 
@@ -85,7 +85,7 @@ class DictPromptTemplate(RunnableSerializable[dict, dict]):
85
85
  """Human-readable representation.
86
86
 
87
87
  Args:
88
- html: Whether to format as HTML. Defaults to False.
88
+ html: Whether to format as HTML. Defaults to `False`.
89
89
 
90
90
  Returns:
91
91
  Human-readable representation.