langchain-core 1.0.0a6__py3-none-any.whl → 1.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (131) hide show
  1. langchain_core/_api/__init__.py +3 -3
  2. langchain_core/_api/beta_decorator.py +6 -6
  3. langchain_core/_api/deprecation.py +21 -29
  4. langchain_core/_api/path.py +3 -6
  5. langchain_core/_import_utils.py +2 -3
  6. langchain_core/agents.py +10 -11
  7. langchain_core/caches.py +7 -7
  8. langchain_core/callbacks/base.py +91 -91
  9. langchain_core/callbacks/file.py +11 -11
  10. langchain_core/callbacks/manager.py +86 -89
  11. langchain_core/callbacks/stdout.py +8 -8
  12. langchain_core/callbacks/usage.py +4 -4
  13. langchain_core/chat_history.py +5 -5
  14. langchain_core/document_loaders/base.py +2 -2
  15. langchain_core/document_loaders/langsmith.py +15 -15
  16. langchain_core/documents/base.py +16 -16
  17. langchain_core/documents/compressor.py +4 -4
  18. langchain_core/example_selectors/length_based.py +1 -1
  19. langchain_core/example_selectors/semantic_similarity.py +17 -19
  20. langchain_core/exceptions.py +3 -3
  21. langchain_core/globals.py +3 -151
  22. langchain_core/indexing/api.py +44 -43
  23. langchain_core/indexing/base.py +30 -30
  24. langchain_core/indexing/in_memory.py +3 -3
  25. langchain_core/language_models/_utils.py +5 -7
  26. langchain_core/language_models/base.py +18 -132
  27. langchain_core/language_models/chat_models.py +118 -227
  28. langchain_core/language_models/fake.py +11 -11
  29. langchain_core/language_models/fake_chat_models.py +35 -29
  30. langchain_core/language_models/llms.py +91 -201
  31. langchain_core/load/dump.py +1 -1
  32. langchain_core/load/load.py +11 -12
  33. langchain_core/load/mapping.py +2 -4
  34. langchain_core/load/serializable.py +2 -4
  35. langchain_core/messages/ai.py +17 -20
  36. langchain_core/messages/base.py +23 -25
  37. langchain_core/messages/block_translators/__init__.py +2 -5
  38. langchain_core/messages/block_translators/anthropic.py +3 -3
  39. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  40. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  41. langchain_core/messages/block_translators/openai.py +6 -6
  42. langchain_core/messages/content.py +120 -124
  43. langchain_core/messages/human.py +7 -7
  44. langchain_core/messages/system.py +7 -7
  45. langchain_core/messages/tool.py +24 -24
  46. langchain_core/messages/utils.py +67 -79
  47. langchain_core/output_parsers/base.py +12 -14
  48. langchain_core/output_parsers/json.py +4 -4
  49. langchain_core/output_parsers/list.py +3 -5
  50. langchain_core/output_parsers/openai_functions.py +3 -3
  51. langchain_core/output_parsers/openai_tools.py +3 -3
  52. langchain_core/output_parsers/pydantic.py +2 -2
  53. langchain_core/output_parsers/transform.py +13 -15
  54. langchain_core/output_parsers/xml.py +7 -9
  55. langchain_core/outputs/chat_generation.py +4 -4
  56. langchain_core/outputs/chat_result.py +1 -3
  57. langchain_core/outputs/generation.py +2 -2
  58. langchain_core/outputs/llm_result.py +5 -5
  59. langchain_core/prompts/__init__.py +1 -5
  60. langchain_core/prompts/base.py +10 -15
  61. langchain_core/prompts/chat.py +31 -82
  62. langchain_core/prompts/dict.py +2 -2
  63. langchain_core/prompts/few_shot.py +5 -5
  64. langchain_core/prompts/few_shot_with_templates.py +4 -4
  65. langchain_core/prompts/loading.py +3 -5
  66. langchain_core/prompts/prompt.py +4 -16
  67. langchain_core/prompts/string.py +2 -1
  68. langchain_core/prompts/structured.py +16 -23
  69. langchain_core/rate_limiters.py +3 -4
  70. langchain_core/retrievers.py +14 -14
  71. langchain_core/runnables/base.py +928 -1042
  72. langchain_core/runnables/branch.py +36 -40
  73. langchain_core/runnables/config.py +27 -35
  74. langchain_core/runnables/configurable.py +108 -124
  75. langchain_core/runnables/fallbacks.py +76 -72
  76. langchain_core/runnables/graph.py +39 -45
  77. langchain_core/runnables/graph_ascii.py +9 -11
  78. langchain_core/runnables/graph_mermaid.py +18 -19
  79. langchain_core/runnables/graph_png.py +8 -9
  80. langchain_core/runnables/history.py +114 -127
  81. langchain_core/runnables/passthrough.py +113 -139
  82. langchain_core/runnables/retry.py +43 -48
  83. langchain_core/runnables/router.py +23 -28
  84. langchain_core/runnables/schema.py +42 -44
  85. langchain_core/runnables/utils.py +28 -31
  86. langchain_core/stores.py +9 -13
  87. langchain_core/structured_query.py +8 -8
  88. langchain_core/tools/base.py +62 -115
  89. langchain_core/tools/convert.py +31 -35
  90. langchain_core/tools/render.py +1 -1
  91. langchain_core/tools/retriever.py +4 -4
  92. langchain_core/tools/simple.py +13 -17
  93. langchain_core/tools/structured.py +12 -15
  94. langchain_core/tracers/base.py +62 -64
  95. langchain_core/tracers/context.py +17 -35
  96. langchain_core/tracers/core.py +49 -53
  97. langchain_core/tracers/evaluation.py +11 -11
  98. langchain_core/tracers/event_stream.py +58 -60
  99. langchain_core/tracers/langchain.py +13 -13
  100. langchain_core/tracers/log_stream.py +22 -24
  101. langchain_core/tracers/root_listeners.py +14 -14
  102. langchain_core/tracers/run_collector.py +2 -4
  103. langchain_core/tracers/schemas.py +8 -8
  104. langchain_core/tracers/stdout.py +2 -1
  105. langchain_core/utils/__init__.py +0 -3
  106. langchain_core/utils/_merge.py +2 -2
  107. langchain_core/utils/aiter.py +24 -28
  108. langchain_core/utils/env.py +4 -4
  109. langchain_core/utils/function_calling.py +31 -41
  110. langchain_core/utils/html.py +3 -4
  111. langchain_core/utils/input.py +3 -3
  112. langchain_core/utils/iter.py +15 -19
  113. langchain_core/utils/json.py +3 -2
  114. langchain_core/utils/json_schema.py +6 -6
  115. langchain_core/utils/mustache.py +3 -5
  116. langchain_core/utils/pydantic.py +16 -18
  117. langchain_core/utils/usage.py +1 -1
  118. langchain_core/utils/utils.py +29 -29
  119. langchain_core/vectorstores/base.py +18 -21
  120. langchain_core/vectorstores/in_memory.py +14 -87
  121. langchain_core/vectorstores/utils.py +2 -2
  122. langchain_core/version.py +1 -1
  123. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/METADATA +10 -21
  124. langchain_core-1.0.0a8.dist-info/RECORD +176 -0
  125. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/WHEEL +1 -1
  126. langchain_core/messages/block_translators/ollama.py +0 -47
  127. langchain_core/prompts/pipeline.py +0 -138
  128. langchain_core/tracers/langchain_v1.py +0 -31
  129. langchain_core/utils/loading.py +0 -35
  130. langchain_core-1.0.0a6.dist-info/RECORD +0 -181
  131. langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
@@ -4,7 +4,7 @@ import copy
4
4
  import json
5
5
  import logging
6
6
  from json import JSONDecodeError
7
- from typing import Annotated, Any, Optional
7
+ from typing import Annotated, Any
8
8
 
9
9
  from pydantic import SkipValidation, ValidationError
10
10
 
@@ -26,7 +26,7 @@ def parse_tool_call(
26
26
  partial: bool = False,
27
27
  strict: bool = False,
28
28
  return_id: bool = True,
29
- ) -> Optional[dict[str, Any]]:
29
+ ) -> dict[str, Any] | None:
30
30
  """Parse a single tool call.
31
31
 
32
32
  Args:
@@ -75,7 +75,7 @@ def parse_tool_call(
75
75
 
76
76
  def make_invalid_tool_call(
77
77
  raw_tool_call: dict[str, Any],
78
- error_msg: Optional[str],
78
+ error_msg: str | None,
79
79
  ) -> InvalidToolCall:
80
80
  """Create an InvalidToolCall from a raw tool call.
81
81
 
@@ -1,7 +1,7 @@
1
1
  """Output parsers using Pydantic."""
2
2
 
3
3
  import json
4
- from typing import Annotated, Generic, Optional
4
+ from typing import Annotated, Generic
5
5
 
6
6
  import pydantic
7
7
  from pydantic import SkipValidation
@@ -44,7 +44,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
44
44
 
45
45
  def parse_result(
46
46
  self, result: list[Generation], *, partial: bool = False
47
- ) -> Optional[TBaseModel]:
47
+ ) -> TBaseModel | None:
48
48
  """Parse the result of an LLM call to a pydantic object.
49
49
 
50
50
  Args:
@@ -5,8 +5,6 @@ from __future__ import annotations
5
5
  from typing import (
6
6
  TYPE_CHECKING,
7
7
  Any,
8
- Optional,
9
- Union,
10
8
  )
11
9
 
12
10
  from typing_extensions import override
@@ -32,7 +30,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
32
30
 
33
31
  def _transform(
34
32
  self,
35
- input: Iterator[Union[str, BaseMessage]],
33
+ input: Iterator[str | BaseMessage],
36
34
  ) -> Iterator[T]:
37
35
  for chunk in input:
38
36
  if isinstance(chunk, BaseMessage):
@@ -42,7 +40,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
42
40
 
43
41
  async def _atransform(
44
42
  self,
45
- input: AsyncIterator[Union[str, BaseMessage]],
43
+ input: AsyncIterator[str | BaseMessage],
46
44
  ) -> AsyncIterator[T]:
47
45
  async for chunk in input:
48
46
  if isinstance(chunk, BaseMessage):
@@ -57,8 +55,8 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
57
55
  @override
58
56
  def transform(
59
57
  self,
60
- input: Iterator[Union[str, BaseMessage]],
61
- config: Optional[RunnableConfig] = None,
58
+ input: Iterator[str | BaseMessage],
59
+ config: RunnableConfig | None = None,
62
60
  **kwargs: Any,
63
61
  ) -> Iterator[T]:
64
62
  """Transform the input into the output format.
@@ -78,8 +76,8 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
78
76
  @override
79
77
  async def atransform(
80
78
  self,
81
- input: AsyncIterator[Union[str, BaseMessage]],
82
- config: Optional[RunnableConfig] = None,
79
+ input: AsyncIterator[str | BaseMessage],
80
+ config: RunnableConfig | None = None,
83
81
  **kwargs: Any,
84
82
  ) -> AsyncIterator[T]:
85
83
  """Async transform the input into the output format.
@@ -108,7 +106,7 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
108
106
 
109
107
  def _diff(
110
108
  self,
111
- prev: Optional[T],
109
+ prev: T | None,
112
110
  next: T, # noqa: A002
113
111
  ) -> T:
114
112
  """Convert parsed outputs into a diff format.
@@ -125,11 +123,11 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
125
123
  raise NotImplementedError
126
124
 
127
125
  @override
128
- def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Any]:
126
+ def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[Any]:
129
127
  prev_parsed = None
130
- acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None
128
+ acc_gen: GenerationChunk | ChatGenerationChunk | None = None
131
129
  for chunk in input:
132
- chunk_gen: Union[GenerationChunk, ChatGenerationChunk]
130
+ chunk_gen: GenerationChunk | ChatGenerationChunk
133
131
  if isinstance(chunk, BaseMessageChunk):
134
132
  chunk_gen = ChatGenerationChunk(message=chunk)
135
133
  elif isinstance(chunk, BaseMessage):
@@ -151,12 +149,12 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
151
149
 
152
150
  @override
153
151
  async def _atransform(
154
- self, input: AsyncIterator[Union[str, BaseMessage]]
152
+ self, input: AsyncIterator[str | BaseMessage]
155
153
  ) -> AsyncIterator[T]:
156
154
  prev_parsed = None
157
- acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None
155
+ acc_gen: GenerationChunk | ChatGenerationChunk | None = None
158
156
  async for chunk in input:
159
- chunk_gen: Union[GenerationChunk, ChatGenerationChunk]
157
+ chunk_gen: GenerationChunk | ChatGenerationChunk
160
158
  if isinstance(chunk, BaseMessageChunk):
161
159
  chunk_gen = ChatGenerationChunk(message=chunk)
162
160
  elif isinstance(chunk, BaseMessage):
@@ -5,7 +5,7 @@ import re
5
5
  import xml
6
6
  import xml.etree.ElementTree as ET
7
7
  from collections.abc import AsyncIterator, Iterator
8
- from typing import Any, Literal, Optional, Union
8
+ from typing import Any, Literal
9
9
  from xml.etree.ElementTree import TreeBuilder
10
10
 
11
11
  from typing_extensions import override
@@ -75,7 +75,7 @@ class _StreamingParser:
75
75
  self.buffer = ""
76
76
  self.xml_started = False
77
77
 
78
- def parse(self, chunk: Union[str, BaseMessage]) -> Iterator[AddableDict]:
78
+ def parse(self, chunk: str | BaseMessage) -> Iterator[AddableDict]:
79
79
  """Parse a chunk of text.
80
80
 
81
81
  Args:
@@ -149,7 +149,7 @@ class _StreamingParser:
149
149
  class XMLOutputParser(BaseTransformOutputParser):
150
150
  """Parse an output using xml format."""
151
151
 
152
- tags: Optional[list[str]] = None
152
+ tags: list[str] | None = None
153
153
  """Tags to tell the LLM to expect in the XML output.
154
154
 
155
155
  Note this may not be perfect depending on the LLM implementation.
@@ -193,7 +193,7 @@ class XMLOutputParser(BaseTransformOutputParser):
193
193
  """Return the format instructions for the XML output."""
194
194
  return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
195
195
 
196
- def parse(self, text: str) -> dict[str, Union[str, list[Any]]]:
196
+ def parse(self, text: str) -> dict[str, str | list[Any]]:
197
197
  """Parse the output of an LLM call.
198
198
 
199
199
  Args:
@@ -240,9 +240,7 @@ class XMLOutputParser(BaseTransformOutputParser):
240
240
  raise OutputParserException(msg, llm_output=text) from e
241
241
 
242
242
  @override
243
- def _transform(
244
- self, input: Iterator[Union[str, BaseMessage]]
245
- ) -> Iterator[AddableDict]:
243
+ def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[AddableDict]:
246
244
  streaming_parser = _StreamingParser(self.parser)
247
245
  for chunk in input:
248
246
  yield from streaming_parser.parse(chunk)
@@ -250,7 +248,7 @@ class XMLOutputParser(BaseTransformOutputParser):
250
248
 
251
249
  @override
252
250
  async def _atransform(
253
- self, input: AsyncIterator[Union[str, BaseMessage]]
251
+ self, input: AsyncIterator[str | BaseMessage]
254
252
  ) -> AsyncIterator[AddableDict]:
255
253
  streaming_parser = _StreamingParser(self.parser)
256
254
  async for chunk in input:
@@ -258,7 +256,7 @@ class XMLOutputParser(BaseTransformOutputParser):
258
256
  yield output
259
257
  streaming_parser.close()
260
258
 
261
- def _root_to_dict(self, root: ET.Element) -> dict[str, Union[str, list[Any]]]:
259
+ def _root_to_dict(self, root: ET.Element) -> dict[str, str | list[Any]]:
262
260
  """Converts xml tree to python dictionary."""
263
261
  if root.text and bool(re.search(r"\S", root.text)):
264
262
  # If root text contains any non-whitespace character it
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Literal, Union
5
+ from typing import Literal
6
6
 
7
7
  from pydantic import model_validator
8
8
  from typing_extensions import Self
@@ -29,7 +29,7 @@ class ChatGeneration(Generation):
29
29
  text: str = ""
30
30
  """The text contents of the output message.
31
31
 
32
- .. warning::
32
+ !!! warning
33
33
  SHOULD NOT BE SET DIRECTLY!
34
34
 
35
35
  """
@@ -82,7 +82,7 @@ class ChatGenerationChunk(ChatGeneration):
82
82
  """Type is used exclusively for serialization purposes."""
83
83
 
84
84
  def __add__(
85
- self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
85
+ self, other: ChatGenerationChunk | list[ChatGenerationChunk]
86
86
  ) -> ChatGenerationChunk:
87
87
  """Concatenate two ``ChatGenerationChunk``s.
88
88
 
@@ -123,7 +123,7 @@ class ChatGenerationChunk(ChatGeneration):
123
123
 
124
124
  def merge_chat_generation_chunks(
125
125
  chunks: list[ChatGenerationChunk],
126
- ) -> Union[ChatGenerationChunk, None]:
126
+ ) -> ChatGenerationChunk | None:
127
127
  """Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
128
128
 
129
129
  Args:
@@ -1,7 +1,5 @@
1
1
  """Chat result schema."""
2
2
 
3
- from typing import Optional
4
-
5
3
  from pydantic import BaseModel
6
4
 
7
5
  from langchain_core.outputs.chat_generation import ChatGeneration
@@ -26,7 +24,7 @@ class ChatResult(BaseModel):
26
24
  Generations is a list to allow for multiple candidate generations for a single
27
25
  input prompt.
28
26
  """
29
- llm_output: Optional[dict] = None
27
+ llm_output: dict | None = None
30
28
  """For arbitrary LLM provider specific output.
31
29
 
32
30
  This dictionary is a free-form dictionary that can contain any information that the
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Literal, Optional
5
+ from typing import Any, Literal
6
6
 
7
7
  from langchain_core.load import Serializable
8
8
  from langchain_core.utils._merge import merge_dicts
@@ -28,7 +28,7 @@ class Generation(Serializable):
28
28
  text: str
29
29
  """Generated text output."""
30
30
 
31
- generation_info: Optional[dict[str, Any]] = None
31
+ generation_info: dict[str, Any] | None = None
32
32
  """Raw response from the provider.
33
33
 
34
34
  May include things like the reason for finishing or token log probabilities.
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from copy import deepcopy
6
- from typing import Literal, Optional, Union
6
+ from typing import Literal
7
7
 
8
8
  from pydantic import BaseModel
9
9
 
@@ -21,7 +21,7 @@ class LLMResult(BaseModel):
21
21
  """
22
22
 
23
23
  generations: list[
24
- list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
24
+ list[Generation | ChatGeneration | GenerationChunk | ChatGenerationChunk]
25
25
  ]
26
26
  """Generated outputs.
27
27
 
@@ -36,7 +36,7 @@ class LLMResult(BaseModel):
36
36
  ChatGeneration is a subclass of Generation that has a field for a structured chat
37
37
  message.
38
38
  """
39
- llm_output: Optional[dict] = None
39
+ llm_output: dict | None = None
40
40
  """For arbitrary LLM provider specific output.
41
41
 
42
42
  This dictionary is a free-form dictionary that can contain any information that the
@@ -45,10 +45,10 @@ class LLMResult(BaseModel):
45
45
  Users should generally avoid relying on this field and instead rely on accessing
46
46
  relevant information from standardized fields present in AIMessage.
47
47
  """
48
- run: Optional[list[RunInfo]] = None
48
+ run: list[RunInfo] | None = None
49
49
  """List of metadata info for model call for each input.
50
50
 
51
- See :class:`~langchain_core.outputs.run_info.RunInfo` for details.
51
+ See `langchain_core.outputs.run_info.RunInfo` for details.
52
52
  """
53
53
 
54
54
  type: Literal["LLMResult"] = "LLMResult"
@@ -8,8 +8,7 @@ from multiple components and prompt values. Prompt classes and functions make co
8
8
 
9
9
  .. code-block::
10
10
 
11
- BasePromptTemplate --> PipelinePromptTemplate
12
- StringPromptTemplate --> PromptTemplate
11
+ BasePromptTemplate --> StringPromptTemplate --> PromptTemplate
13
12
  FewShotPromptTemplate
14
13
  FewShotPromptWithTemplates
15
14
  BaseChatPromptTemplate --> AutoGPTPrompt
@@ -53,7 +52,6 @@ if TYPE_CHECKING:
53
52
  FewShotPromptWithTemplates,
54
53
  )
55
54
  from langchain_core.prompts.loading import load_prompt
56
- from langchain_core.prompts.pipeline import PipelinePromptTemplate
57
55
  from langchain_core.prompts.prompt import PromptTemplate
58
56
  from langchain_core.prompts.string import (
59
57
  StringPromptTemplate,
@@ -75,7 +73,6 @@ __all__ = (
75
73
  "FewShotPromptWithTemplates",
76
74
  "HumanMessagePromptTemplate",
77
75
  "MessagesPlaceholder",
78
- "PipelinePromptTemplate",
79
76
  "PromptTemplate",
80
77
  "StringPromptTemplate",
81
78
  "SystemMessagePromptTemplate",
@@ -104,7 +101,6 @@ _dynamic_imports = {
104
101
  "FewShotPromptTemplate": "few_shot",
105
102
  "FewShotPromptWithTemplates": "few_shot_with_templates",
106
103
  "load_prompt": "loading",
107
- "PipelinePromptTemplate": "pipeline",
108
104
  "PromptTemplate": "prompt",
109
105
  "StringPromptTemplate": "string",
110
106
  "check_valid_template": "string",
@@ -6,17 +6,14 @@ import contextlib
6
6
  import json
7
7
  import typing
8
8
  from abc import ABC, abstractmethod
9
- from collections.abc import Mapping
9
+ from collections.abc import Callable, Mapping
10
10
  from functools import cached_property
11
11
  from pathlib import Path
12
12
  from typing import (
13
13
  TYPE_CHECKING,
14
14
  Any,
15
- Callable,
16
15
  Generic,
17
- Optional,
18
16
  TypeVar,
19
- Union,
20
17
  )
21
18
 
22
19
  import yaml
@@ -57,16 +54,16 @@ class BasePromptTemplate(
57
54
  input_types: typing.Dict[str, Any] = Field(default_factory=dict, exclude=True) # noqa: UP006
58
55
  """A dictionary of the types of the variables the prompt template expects.
59
56
  If not provided, all variables are assumed to be strings."""
60
- output_parser: Optional[BaseOutputParser] = None
57
+ output_parser: BaseOutputParser | None = None
61
58
  """How to parse the output of calling an LLM on this formatted prompt."""
62
59
  partial_variables: Mapping[str, Any] = Field(default_factory=dict)
63
60
  """A dictionary of the partial variables the prompt template carries.
64
61
 
65
62
  Partial variables populate the template so that you don't need to
66
63
  pass them in every time you call the prompt."""
67
- metadata: Optional[typing.Dict[str, Any]] = None # noqa: UP006
64
+ metadata: typing.Dict[str, Any] | None = None # noqa: UP006
68
65
  """Metadata to be used for tracing."""
69
- tags: Optional[list[str]] = None
66
+ tags: list[str] | None = None
70
67
  """Tags to be used for tracing."""
71
68
 
72
69
  @model_validator(mode="after")
@@ -123,12 +120,10 @@ class BasePromptTemplate(
123
120
  @override
124
121
  def OutputType(self) -> Any:
125
122
  """Return the output type of the prompt."""
126
- return Union[StringPromptValue, ChatPromptValueConcrete]
123
+ return StringPromptValue | ChatPromptValueConcrete
127
124
 
128
125
  @override
129
- def get_input_schema(
130
- self, config: Optional[RunnableConfig] = None
131
- ) -> type[BaseModel]:
126
+ def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
132
127
  """Get the input schema for the prompt.
133
128
 
134
129
  Args:
@@ -195,7 +190,7 @@ class BasePromptTemplate(
195
190
 
196
191
  @override
197
192
  def invoke(
198
- self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
193
+ self, input: dict, config: RunnableConfig | None = None, **kwargs: Any
199
194
  ) -> PromptValue:
200
195
  """Invoke the prompt.
201
196
 
@@ -221,7 +216,7 @@ class BasePromptTemplate(
221
216
 
222
217
  @override
223
218
  async def ainvoke(
224
- self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
219
+ self, input: dict, config: RunnableConfig | None = None, **kwargs: Any
225
220
  ) -> PromptValue:
226
221
  """Async invoke the prompt.
227
222
 
@@ -267,7 +262,7 @@ class BasePromptTemplate(
267
262
  """
268
263
  return self.format_prompt(**kwargs)
269
264
 
270
- def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate:
265
+ def partial(self, **kwargs: str | Callable[[], str]) -> BasePromptTemplate:
271
266
  """Return a partial of the prompt template.
272
267
 
273
268
  Args:
@@ -345,7 +340,7 @@ class BasePromptTemplate(
345
340
  prompt_dict["_type"] = self._prompt_type
346
341
  return prompt_dict
347
342
 
348
- def save(self, file_path: Union[Path, str]) -> None:
343
+ def save(self, file_path: Path | str) -> None:
349
344
  """Save the prompt.
350
345
 
351
346
  Args: