langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (172) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +45 -70
  4. langchain_core/_api/deprecation.py +80 -80
  5. langchain_core/_api/path.py +22 -8
  6. langchain_core/_import_utils.py +10 -4
  7. langchain_core/agents.py +25 -21
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +341 -348
  11. langchain_core/callbacks/file.py +55 -44
  12. langchain_core/callbacks/manager.py +546 -683
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +35 -36
  15. langchain_core/callbacks/usage.py +65 -70
  16. langchain_core/chat_history.py +48 -55
  17. langchain_core/document_loaders/base.py +46 -21
  18. langchain_core/document_loaders/langsmith.py +39 -36
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +96 -74
  21. langchain_core/documents/compressor.py +12 -9
  22. langchain_core/documents/transformers.py +29 -28
  23. langchain_core/embeddings/fake.py +56 -57
  24. langchain_core/env.py +2 -3
  25. langchain_core/example_selectors/base.py +12 -0
  26. langchain_core/example_selectors/length_based.py +1 -1
  27. langchain_core/example_selectors/semantic_similarity.py +21 -25
  28. langchain_core/exceptions.py +15 -9
  29. langchain_core/globals.py +4 -163
  30. langchain_core/indexing/api.py +132 -125
  31. langchain_core/indexing/base.py +64 -67
  32. langchain_core/indexing/in_memory.py +26 -6
  33. langchain_core/language_models/__init__.py +15 -27
  34. langchain_core/language_models/_utils.py +267 -117
  35. langchain_core/language_models/base.py +92 -177
  36. langchain_core/language_models/chat_models.py +547 -407
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +72 -118
  39. langchain_core/language_models/llms.py +168 -242
  40. langchain_core/load/dump.py +8 -11
  41. langchain_core/load/load.py +32 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +50 -56
  44. langchain_core/messages/__init__.py +36 -51
  45. langchain_core/messages/ai.py +377 -150
  46. langchain_core/messages/base.py +239 -47
  47. langchain_core/messages/block_translators/__init__.py +111 -0
  48. langchain_core/messages/block_translators/anthropic.py +470 -0
  49. langchain_core/messages/block_translators/bedrock.py +94 -0
  50. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  51. langchain_core/messages/block_translators/google_genai.py +530 -0
  52. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  53. langchain_core/messages/block_translators/groq.py +143 -0
  54. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  55. langchain_core/messages/block_translators/openai.py +1010 -0
  56. langchain_core/messages/chat.py +2 -3
  57. langchain_core/messages/content.py +1423 -0
  58. langchain_core/messages/function.py +7 -7
  59. langchain_core/messages/human.py +44 -38
  60. langchain_core/messages/modifier.py +3 -2
  61. langchain_core/messages/system.py +40 -27
  62. langchain_core/messages/tool.py +160 -58
  63. langchain_core/messages/utils.py +527 -638
  64. langchain_core/output_parsers/__init__.py +1 -14
  65. langchain_core/output_parsers/base.py +68 -104
  66. langchain_core/output_parsers/json.py +13 -17
  67. langchain_core/output_parsers/list.py +11 -33
  68. langchain_core/output_parsers/openai_functions.py +56 -74
  69. langchain_core/output_parsers/openai_tools.py +68 -109
  70. langchain_core/output_parsers/pydantic.py +15 -13
  71. langchain_core/output_parsers/string.py +6 -2
  72. langchain_core/output_parsers/transform.py +17 -60
  73. langchain_core/output_parsers/xml.py +34 -44
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +26 -11
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +17 -6
  78. langchain_core/outputs/llm_result.py +15 -8
  79. langchain_core/prompt_values.py +29 -123
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +48 -63
  82. langchain_core/prompts/chat.py +259 -288
  83. langchain_core/prompts/dict.py +19 -11
  84. langchain_core/prompts/few_shot.py +84 -90
  85. langchain_core/prompts/few_shot_with_templates.py +14 -12
  86. langchain_core/prompts/image.py +19 -14
  87. langchain_core/prompts/loading.py +6 -8
  88. langchain_core/prompts/message.py +7 -8
  89. langchain_core/prompts/prompt.py +42 -43
  90. langchain_core/prompts/string.py +37 -16
  91. langchain_core/prompts/structured.py +43 -46
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +52 -192
  94. langchain_core/runnables/base.py +1727 -1683
  95. langchain_core/runnables/branch.py +52 -73
  96. langchain_core/runnables/config.py +89 -103
  97. langchain_core/runnables/configurable.py +128 -130
  98. langchain_core/runnables/fallbacks.py +93 -82
  99. langchain_core/runnables/graph.py +127 -127
  100. langchain_core/runnables/graph_ascii.py +63 -41
  101. langchain_core/runnables/graph_mermaid.py +87 -70
  102. langchain_core/runnables/graph_png.py +31 -36
  103. langchain_core/runnables/history.py +145 -161
  104. langchain_core/runnables/passthrough.py +141 -144
  105. langchain_core/runnables/retry.py +84 -68
  106. langchain_core/runnables/router.py +33 -37
  107. langchain_core/runnables/schema.py +79 -72
  108. langchain_core/runnables/utils.py +95 -139
  109. langchain_core/stores.py +85 -131
  110. langchain_core/structured_query.py +11 -15
  111. langchain_core/sys_info.py +31 -32
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +221 -247
  114. langchain_core/tools/convert.py +144 -161
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -19
  117. langchain_core/tools/simple.py +52 -29
  118. langchain_core/tools/structured.py +56 -60
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/_streaming.py +6 -7
  121. langchain_core/tracers/base.py +103 -112
  122. langchain_core/tracers/context.py +29 -48
  123. langchain_core/tracers/core.py +142 -105
  124. langchain_core/tracers/evaluation.py +30 -34
  125. langchain_core/tracers/event_stream.py +162 -117
  126. langchain_core/tracers/langchain.py +34 -36
  127. langchain_core/tracers/log_stream.py +87 -49
  128. langchain_core/tracers/memory_stream.py +3 -3
  129. langchain_core/tracers/root_listeners.py +18 -34
  130. langchain_core/tracers/run_collector.py +8 -20
  131. langchain_core/tracers/schemas.py +0 -125
  132. langchain_core/tracers/stdout.py +3 -3
  133. langchain_core/utils/__init__.py +1 -4
  134. langchain_core/utils/_merge.py +47 -9
  135. langchain_core/utils/aiter.py +70 -66
  136. langchain_core/utils/env.py +12 -9
  137. langchain_core/utils/function_calling.py +139 -206
  138. langchain_core/utils/html.py +7 -8
  139. langchain_core/utils/input.py +6 -6
  140. langchain_core/utils/interactive_env.py +6 -2
  141. langchain_core/utils/iter.py +48 -45
  142. langchain_core/utils/json.py +14 -4
  143. langchain_core/utils/json_schema.py +159 -43
  144. langchain_core/utils/mustache.py +32 -25
  145. langchain_core/utils/pydantic.py +67 -40
  146. langchain_core/utils/strings.py +5 -5
  147. langchain_core/utils/usage.py +1 -1
  148. langchain_core/utils/utils.py +104 -62
  149. langchain_core/vectorstores/base.py +131 -179
  150. langchain_core/vectorstores/in_memory.py +113 -182
  151. langchain_core/vectorstores/utils.py +23 -17
  152. langchain_core/version.py +1 -1
  153. langchain_core-1.0.0.dist-info/METADATA +68 -0
  154. langchain_core-1.0.0.dist-info/RECORD +172 -0
  155. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  156. langchain_core/beta/__init__.py +0 -1
  157. langchain_core/beta/runnables/__init__.py +0 -1
  158. langchain_core/beta/runnables/context.py +0 -448
  159. langchain_core/memory.py +0 -116
  160. langchain_core/messages/content_blocks.py +0 -1435
  161. langchain_core/prompts/pipeline.py +0 -133
  162. langchain_core/pydantic_v1/__init__.py +0 -30
  163. langchain_core/pydantic_v1/dataclasses.py +0 -23
  164. langchain_core/pydantic_v1/main.py +0 -23
  165. langchain_core/tracers/langchain_v1.py +0 -23
  166. langchain_core/utils/loading.py +0 -31
  167. langchain_core/v1/__init__.py +0 -1
  168. langchain_core/v1/chat_models.py +0 -1047
  169. langchain_core/v1/messages.py +0 -755
  170. langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
  171. langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
  172. langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
@@ -8,85 +8,39 @@ from __future__ import annotations
8
8
 
9
9
  from abc import ABC, abstractmethod
10
10
  from collections.abc import Sequence
11
- from typing import Literal, Union, cast
11
+ from typing import Literal, cast
12
12
 
13
- from typing_extensions import TypedDict, overload
13
+ from typing_extensions import TypedDict
14
14
 
15
15
  from langchain_core.load.serializable import Serializable
16
16
  from langchain_core.messages import (
17
- AIMessage,
18
17
  AnyMessage,
19
18
  BaseMessage,
20
19
  HumanMessage,
21
- SystemMessage,
22
- ToolMessage,
23
20
  get_buffer_string,
24
21
  )
25
- from langchain_core.messages import content_blocks as types
26
- from langchain_core.v1.messages import AIMessage as AIMessageV1
27
- from langchain_core.v1.messages import HumanMessage as HumanMessageV1
28
- from langchain_core.v1.messages import MessageV1, ResponseMetadata
29
- from langchain_core.v1.messages import SystemMessage as SystemMessageV1
30
- from langchain_core.v1.messages import ToolMessage as ToolMessageV1
31
-
32
-
33
- def _convert_to_v1(message: BaseMessage) -> MessageV1:
34
- """Best-effort conversion of a V0 AIMessage to V1."""
35
- if isinstance(message.content, str):
36
- content: list[types.ContentBlock] = []
37
- if message.content:
38
- content = [{"type": "text", "text": message.content}]
39
- else:
40
- content = []
41
- for block in message.content:
42
- if isinstance(block, str):
43
- content.append({"type": "text", "text": block})
44
- elif isinstance(block, dict):
45
- content.append(cast("types.ContentBlock", block))
46
- else:
47
- pass
48
-
49
- if isinstance(message, HumanMessage):
50
- return HumanMessageV1(content=content)
51
- if isinstance(message, AIMessage):
52
- for tool_call in message.tool_calls:
53
- content.append(tool_call)
54
- return AIMessageV1(
55
- content=content,
56
- usage_metadata=message.usage_metadata,
57
- response_metadata=cast("ResponseMetadata", message.response_metadata),
58
- tool_calls=message.tool_calls,
59
- )
60
- if isinstance(message, SystemMessage):
61
- return SystemMessageV1(content=content)
62
- if isinstance(message, ToolMessage):
63
- return ToolMessageV1(
64
- tool_call_id=message.tool_call_id,
65
- content=content,
66
- artifact=message.artifact,
67
- )
68
- error_message = f"Unsupported message type: {type(message)}"
69
- raise TypeError(error_message)
70
22
 
71
23
 
72
24
  class PromptValue(Serializable, ABC):
73
25
  """Base abstract class for inputs to any language model.
74
26
 
75
- PromptValues can be converted to both LLM (pure text-generation) inputs and
76
- ChatModel inputs.
27
+ `PromptValues` can be converted to both LLM (pure text-generation) inputs and
28
+ chat model inputs.
77
29
  """
78
30
 
79
31
  @classmethod
80
32
  def is_lc_serializable(cls) -> bool:
81
- """Return whether this class is serializable. Defaults to True."""
33
+ """Return True as this class is serializable."""
82
34
  return True
83
35
 
84
36
  @classmethod
85
37
  def get_lc_namespace(cls) -> list[str]:
86
- """Get the namespace of the langchain object.
38
+ """Get the namespace of the LangChain object.
87
39
 
88
40
  This is used to determine the namespace of the object when serializing.
89
- Defaults to ["langchain", "schema", "prompt"].
41
+
42
+ Returns:
43
+ `["langchain", "schema", "prompt"]`
90
44
  """
91
45
  return ["langchain", "schema", "prompt"]
92
46
 
@@ -94,18 +48,8 @@ class PromptValue(Serializable, ABC):
94
48
  def to_string(self) -> str:
95
49
  """Return prompt value as string."""
96
50
 
97
- @overload
98
- def to_messages(
99
- self, message_version: Literal["v0"] = "v0"
100
- ) -> list[BaseMessage]: ...
101
-
102
- @overload
103
- def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
104
-
105
51
  @abstractmethod
106
- def to_messages(
107
- self, message_version: Literal["v0", "v1"] = "v0"
108
- ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
52
+ def to_messages(self) -> list[BaseMessage]:
109
53
  """Return prompt as a list of Messages."""
110
54
 
111
55
 
@@ -118,10 +62,12 @@ class StringPromptValue(PromptValue):
118
62
 
119
63
  @classmethod
120
64
  def get_lc_namespace(cls) -> list[str]:
121
- """Get the namespace of the langchain object.
65
+ """Get the namespace of the LangChain object.
122
66
 
123
67
  This is used to determine the namespace of the object when serializing.
124
- Defaults to ["langchain", "prompts", "base"].
68
+
69
+ Returns:
70
+ `["langchain", "prompts", "base"]`
125
71
  """
126
72
  return ["langchain", "prompts", "base"]
127
73
 
@@ -129,20 +75,8 @@ class StringPromptValue(PromptValue):
129
75
  """Return prompt as string."""
130
76
  return self.text
131
77
 
132
- @overload
133
- def to_messages(
134
- self, message_version: Literal["v0"] = "v0"
135
- ) -> list[BaseMessage]: ...
136
-
137
- @overload
138
- def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
139
-
140
- def to_messages(
141
- self, message_version: Literal["v0", "v1"] = "v0"
142
- ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
78
+ def to_messages(self) -> list[BaseMessage]:
143
79
  """Return prompt as messages."""
144
- if message_version == "v1":
145
- return [HumanMessageV1(content=self.text)]
146
80
  return [HumanMessage(content=self.text)]
147
81
 
148
82
 
@@ -159,32 +93,18 @@ class ChatPromptValue(PromptValue):
159
93
  """Return prompt as string."""
160
94
  return get_buffer_string(self.messages)
161
95
 
162
- @overload
163
- def to_messages(
164
- self, message_version: Literal["v0"] = "v0"
165
- ) -> list[BaseMessage]: ...
166
-
167
- @overload
168
- def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
169
-
170
- def to_messages(
171
- self, message_version: Literal["v0", "v1"] = "v0"
172
- ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
173
- """Return prompt as a list of messages.
174
-
175
- Args:
176
- message_version: The output version, either "v0" (default) or "v1".
177
- """
178
- if message_version == "v1":
179
- return [_convert_to_v1(m) for m in self.messages]
96
+ def to_messages(self) -> list[BaseMessage]:
97
+ """Return prompt as a list of messages."""
180
98
  return list(self.messages)
181
99
 
182
100
  @classmethod
183
101
  def get_lc_namespace(cls) -> list[str]:
184
- """Get the namespace of the langchain object.
102
+ """Get the namespace of the LangChain object.
185
103
 
186
104
  This is used to determine the namespace of the object when serializing.
187
- Defaults to ["langchain", "prompts", "chat"].
105
+
106
+ Returns:
107
+ `["langchain", "prompts", "chat"]`
188
108
  """
189
109
  return ["langchain", "prompts", "chat"]
190
110
 
@@ -193,8 +113,12 @@ class ImageURL(TypedDict, total=False):
193
113
  """Image URL."""
194
114
 
195
115
  detail: Literal["auto", "low", "high"]
196
- """Specifies the detail level of the image. Defaults to "auto".
197
- Can be "auto", "low", or "high"."""
116
+ """Specifies the detail level of the image.
117
+
118
+ Can be `'auto'`, `'low'`, or `'high'`.
119
+
120
+ This follows OpenAI's Chat Completion API's image URL format.
121
+ """
198
122
 
199
123
  url: str
200
124
  """Either a URL of the image or the base64 encoded image data."""
@@ -209,28 +133,10 @@ class ImagePromptValue(PromptValue):
209
133
 
210
134
  def to_string(self) -> str:
211
135
  """Return prompt (image URL) as string."""
212
- return self.image_url["url"]
213
-
214
- @overload
215
- def to_messages(
216
- self, message_version: Literal["v0"] = "v0"
217
- ) -> list[BaseMessage]: ...
218
-
219
- @overload
220
- def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ...
136
+ return self.image_url.get("url", "")
221
137
 
222
- def to_messages(
223
- self, message_version: Literal["v0", "v1"] = "v0"
224
- ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]:
138
+ def to_messages(self) -> list[BaseMessage]:
225
139
  """Return prompt (image URL) as messages."""
226
- if message_version == "v1":
227
- block: types.ImageContentBlock = {
228
- "type": "image",
229
- "url": self.image_url["url"],
230
- }
231
- if "detail" in self.image_url:
232
- block["detail"] = self.image_url["detail"]
233
- return [HumanMessageV1(content=[block])]
234
140
  return [HumanMessage(content=[cast("dict", self.image_url)])]
235
141
 
236
142
 
@@ -1,29 +1,8 @@
1
1
  """**Prompt** is the input to the model.
2
2
 
3
- Prompt is often constructed
4
- from multiple components and prompt values. Prompt classes and functions make constructing
5
- and working with prompts easy.
6
-
7
- **Class hierarchy:**
8
-
9
- .. code-block::
10
-
11
- BasePromptTemplate --> PipelinePromptTemplate
12
- StringPromptTemplate --> PromptTemplate
13
- FewShotPromptTemplate
14
- FewShotPromptWithTemplates
15
- BaseChatPromptTemplate --> AutoGPTPrompt
16
- ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
17
-
18
-
19
-
20
- BaseMessagePromptTemplate --> MessagesPlaceholder
21
- BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
22
- HumanMessagePromptTemplate
23
- AIMessagePromptTemplate
24
- SystemMessagePromptTemplate
25
-
26
- """ # noqa: E501
3
+ Prompt is often constructed from multiple components and prompt values. Prompt classes
4
+ and functions make constructing and working with prompts easy.
5
+ """
27
6
 
28
7
  from typing import TYPE_CHECKING
29
8
 
@@ -53,7 +32,6 @@ if TYPE_CHECKING:
53
32
  FewShotPromptWithTemplates,
54
33
  )
55
34
  from langchain_core.prompts.loading import load_prompt
56
- from langchain_core.prompts.pipeline import PipelinePromptTemplate
57
35
  from langchain_core.prompts.prompt import PromptTemplate
58
36
  from langchain_core.prompts.string import (
59
37
  StringPromptTemplate,
@@ -75,7 +53,6 @@ __all__ = (
75
53
  "FewShotPromptWithTemplates",
76
54
  "HumanMessagePromptTemplate",
77
55
  "MessagesPlaceholder",
78
- "PipelinePromptTemplate",
79
56
  "PromptTemplate",
80
57
  "StringPromptTemplate",
81
58
  "SystemMessagePromptTemplate",
@@ -104,7 +81,6 @@ _dynamic_imports = {
104
81
  "FewShotPromptTemplate": "few_shot",
105
82
  "FewShotPromptWithTemplates": "few_shot_with_templates",
106
83
  "load_prompt": "loading",
107
- "PipelinePromptTemplate": "pipeline",
108
84
  "PromptTemplate": "prompt",
109
85
  "StringPromptTemplate": "string",
110
86
  "check_valid_template": "string",
@@ -6,17 +6,14 @@ import contextlib
6
6
  import json
7
7
  import typing
8
8
  from abc import ABC, abstractmethod
9
- from collections.abc import Mapping
9
+ from collections.abc import Callable, Mapping
10
10
  from functools import cached_property
11
11
  from pathlib import Path
12
12
  from typing import (
13
13
  TYPE_CHECKING,
14
14
  Any,
15
- Callable,
16
15
  Generic,
17
- Optional,
18
16
  TypeVar,
19
- Union,
20
17
  )
21
18
 
22
19
  import yaml
@@ -57,16 +54,16 @@ class BasePromptTemplate(
57
54
  input_types: typing.Dict[str, Any] = Field(default_factory=dict, exclude=True) # noqa: UP006
58
55
  """A dictionary of the types of the variables the prompt template expects.
59
56
  If not provided, all variables are assumed to be strings."""
60
- output_parser: Optional[BaseOutputParser] = None
57
+ output_parser: BaseOutputParser | None = None
61
58
  """How to parse the output of calling an LLM on this formatted prompt."""
62
59
  partial_variables: Mapping[str, Any] = Field(default_factory=dict)
63
60
  """A dictionary of the partial variables the prompt template carries.
64
61
 
65
62
  Partial variables populate the template so that you don't need to
66
63
  pass them in every time you call the prompt."""
67
- metadata: Optional[typing.Dict[str, Any]] = None # noqa: UP006
64
+ metadata: typing.Dict[str, Any] | None = None # noqa: UP006
68
65
  """Metadata to be used for tracing."""
69
- tags: Optional[list[str]] = None
66
+ tags: list[str] | None = None
70
67
  """Tags to be used for tracing."""
71
68
 
72
69
  @model_validator(mode="after")
@@ -99,18 +96,16 @@ class BasePromptTemplate(
99
96
 
100
97
  @classmethod
101
98
  def get_lc_namespace(cls) -> list[str]:
102
- """Get the namespace of the langchain object.
99
+ """Get the namespace of the LangChain object.
103
100
 
104
- Returns ["langchain", "schema", "prompt_template"].
101
+ Returns:
102
+ `["langchain", "schema", "prompt_template"]`
105
103
  """
106
104
  return ["langchain", "schema", "prompt_template"]
107
105
 
108
106
  @classmethod
109
107
  def is_lc_serializable(cls) -> bool:
110
- """Return whether this class is serializable.
111
-
112
- Returns True.
113
- """
108
+ """Return True as this class is serializable."""
114
109
  return True
115
110
 
116
111
  model_config = ConfigDict(
@@ -125,19 +120,17 @@ class BasePromptTemplate(
125
120
  @override
126
121
  def OutputType(self) -> Any:
127
122
  """Return the output type of the prompt."""
128
- return Union[StringPromptValue, ChatPromptValueConcrete]
123
+ return StringPromptValue | ChatPromptValueConcrete
129
124
 
130
125
  @override
131
- def get_input_schema(
132
- self, config: Optional[RunnableConfig] = None
133
- ) -> type[BaseModel]:
126
+ def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
134
127
  """Get the input schema for the prompt.
135
128
 
136
129
  Args:
137
- config: RunnableConfig, configuration for the prompt.
130
+ config: configuration for the prompt.
138
131
 
139
132
  Returns:
140
- Type[BaseModel]: The input schema for the prompt.
133
+ The input schema for the prompt.
141
134
  """
142
135
  # This is correct, but pydantic typings/mypy don't think so.
143
136
  required_input_variables = {
@@ -197,7 +190,7 @@ class BasePromptTemplate(
197
190
 
198
191
  @override
199
192
  def invoke(
200
- self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
193
+ self, input: dict, config: RunnableConfig | None = None, **kwargs: Any
201
194
  ) -> PromptValue:
202
195
  """Invoke the prompt.
203
196
 
@@ -206,13 +199,13 @@ class BasePromptTemplate(
206
199
  config: RunnableConfig, configuration for the prompt.
207
200
 
208
201
  Returns:
209
- PromptValue: The output of the prompt.
202
+ The output of the prompt.
210
203
  """
211
204
  config = ensure_config(config)
212
205
  if self.metadata:
213
206
  config["metadata"] = {**config["metadata"], **self.metadata}
214
207
  if self.tags:
215
- config["tags"] = config["tags"] + self.tags
208
+ config["tags"] += self.tags
216
209
  return self._call_with_config(
217
210
  self._format_prompt_with_error_handling,
218
211
  input,
@@ -223,7 +216,7 @@ class BasePromptTemplate(
223
216
 
224
217
  @override
225
218
  async def ainvoke(
226
- self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
219
+ self, input: dict, config: RunnableConfig | None = None, **kwargs: Any
227
220
  ) -> PromptValue:
228
221
  """Async invoke the prompt.
229
222
 
@@ -232,7 +225,7 @@ class BasePromptTemplate(
232
225
  config: RunnableConfig, configuration for the prompt.
233
226
 
234
227
  Returns:
235
- PromptValue: The output of the prompt.
228
+ The output of the prompt.
236
229
  """
237
230
  config = ensure_config(config)
238
231
  if self.metadata:
@@ -252,31 +245,31 @@ class BasePromptTemplate(
252
245
  """Create Prompt Value.
253
246
 
254
247
  Args:
255
- kwargs: Any arguments to be passed to the prompt template.
248
+ **kwargs: Any arguments to be passed to the prompt template.
256
249
 
257
250
  Returns:
258
- PromptValue: The output of the prompt.
251
+ The output of the prompt.
259
252
  """
260
253
 
261
254
  async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
262
255
  """Async create Prompt Value.
263
256
 
264
257
  Args:
265
- kwargs: Any arguments to be passed to the prompt template.
258
+ **kwargs: Any arguments to be passed to the prompt template.
266
259
 
267
260
  Returns:
268
- PromptValue: The output of the prompt.
261
+ The output of the prompt.
269
262
  """
270
263
  return self.format_prompt(**kwargs)
271
264
 
272
- def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate:
265
+ def partial(self, **kwargs: str | Callable[[], str]) -> BasePromptTemplate:
273
266
  """Return a partial of the prompt template.
274
267
 
275
268
  Args:
276
- kwargs: Union[str, Callable[[], str]], partial variables to set.
269
+ **kwargs: partial variables to set.
277
270
 
278
271
  Returns:
279
- BasePromptTemplate: A partial of the prompt template.
272
+ A partial of the prompt template.
280
273
  """
281
274
  prompt_dict = self.__dict__.copy()
282
275
  prompt_dict["input_variables"] = list(
@@ -297,34 +290,30 @@ class BasePromptTemplate(
297
290
  """Format the prompt with the inputs.
298
291
 
299
292
  Args:
300
- kwargs: Any arguments to be passed to the prompt template.
293
+ **kwargs: Any arguments to be passed to the prompt template.
301
294
 
302
295
  Returns:
303
296
  A formatted string.
304
297
 
305
298
  Example:
306
-
307
- .. code-block:: python
308
-
309
- prompt.format(variable1="foo")
310
-
299
+ ```python
300
+ prompt.format(variable1="foo")
301
+ ```
311
302
  """
312
303
 
313
304
  async def aformat(self, **kwargs: Any) -> FormatOutputType:
314
305
  """Async format the prompt with the inputs.
315
306
 
316
307
  Args:
317
- kwargs: Any arguments to be passed to the prompt template.
308
+ **kwargs: Any arguments to be passed to the prompt template.
318
309
 
319
310
  Returns:
320
311
  A formatted string.
321
312
 
322
313
  Example:
323
-
324
- .. code-block:: python
325
-
326
- await prompt.aformat(variable1="foo")
327
-
314
+ ```python
315
+ await prompt.aformat(variable1="foo")
316
+ ```
328
317
  """
329
318
  return self.format(**kwargs)
330
319
 
@@ -337,20 +326,17 @@ class BasePromptTemplate(
337
326
  """Return dictionary representation of prompt.
338
327
 
339
328
  Args:
340
- kwargs: Any additional arguments to pass to the dictionary.
329
+ **kwargs: Any additional arguments to pass to the dictionary.
341
330
 
342
331
  Returns:
343
- Dict: Dictionary representation of the prompt.
344
-
345
- Raises:
346
- NotImplementedError: If the prompt type is not implemented.
332
+ Dictionary representation of the prompt.
347
333
  """
348
334
  prompt_dict = super().model_dump(**kwargs)
349
335
  with contextlib.suppress(NotImplementedError):
350
336
  prompt_dict["_type"] = self._prompt_type
351
337
  return prompt_dict
352
338
 
353
- def save(self, file_path: Union[Path, str]) -> None:
339
+ def save(self, file_path: Path | str) -> None:
354
340
  """Save the prompt.
355
341
 
356
342
  Args:
@@ -362,10 +348,9 @@ class BasePromptTemplate(
362
348
  NotImplementedError: If the prompt type is not implemented.
363
349
 
364
350
  Example:
365
- .. code-block:: python
366
-
367
- prompt.save(file_path="path/prompt.yaml")
368
-
351
+ ```python
352
+ prompt.save(file_path="path/prompt.yaml")
353
+ ```
369
354
  """
370
355
  if self.partial_variables:
371
356
  msg = "Cannot save prompt with partial variables."
@@ -384,10 +369,10 @@ class BasePromptTemplate(
384
369
  directory_path.mkdir(parents=True, exist_ok=True)
385
370
 
386
371
  if save_path.suffix == ".json":
387
- with save_path.open("w") as f:
372
+ with save_path.open("w", encoding="utf-8") as f:
388
373
  json.dump(prompt_dict, f, indent=4)
389
374
  elif save_path.suffix.endswith((".yaml", ".yml")):
390
- with save_path.open("w") as f:
375
+ with save_path.open("w", encoding="utf-8") as f:
391
376
  yaml.dump(prompt_dict, f, default_flow_style=False)
392
377
  else:
393
378
  msg = f"{save_path} must be json or yaml"
@@ -436,16 +421,16 @@ def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str:
436
421
  string of the document formatted.
437
422
 
438
423
  Example:
439
- .. code-block:: python
440
-
441
- from langchain_core.documents import Document
442
- from langchain_core.prompts import PromptTemplate
424
+ ```python
425
+ from langchain_core.documents import Document
426
+ from langchain_core.prompts import PromptTemplate
443
427
 
444
- doc = Document(page_content="This is a joke", metadata={"page": "1"})
445
- prompt = PromptTemplate.from_template("Page {page}: {page_content}")
446
- format_document(doc, prompt)
447
- >>> "Page 1: This is a joke"
428
+ doc = Document(page_content="This is a joke", metadata={"page": "1"})
429
+ prompt = PromptTemplate.from_template("Page {page}: {page_content}")
430
+ format_document(doc, prompt)
431
+ >>> "Page 1: This is a joke"
448
432
 
433
+ ```
449
434
  """
450
435
  return prompt.format(**_get_document_info(doc, prompt))
451
436