langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +52 -65
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +19 -19
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +323 -334
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +441 -507
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +48 -63
  17. langchain_core/document_loaders/base.py +23 -23
  18. langchain_core/document_loaders/langsmith.py +37 -37
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +62 -65
  21. langchain_core/documents/compressor.py +4 -4
  22. langchain_core/documents/transformers.py +28 -29
  23. langchain_core/embeddings/fake.py +50 -54
  24. langchain_core/example_selectors/length_based.py +1 -1
  25. langchain_core/example_selectors/semantic_similarity.py +21 -25
  26. langchain_core/exceptions.py +10 -11
  27. langchain_core/globals.py +3 -151
  28. langchain_core/indexing/api.py +61 -66
  29. langchain_core/indexing/base.py +58 -58
  30. langchain_core/indexing/in_memory.py +3 -3
  31. langchain_core/language_models/__init__.py +14 -27
  32. langchain_core/language_models/_utils.py +270 -84
  33. langchain_core/language_models/base.py +55 -162
  34. langchain_core/language_models/chat_models.py +442 -402
  35. langchain_core/language_models/fake.py +11 -11
  36. langchain_core/language_models/fake_chat_models.py +61 -39
  37. langchain_core/language_models/llms.py +123 -231
  38. langchain_core/load/dump.py +4 -5
  39. langchain_core/load/load.py +18 -28
  40. langchain_core/load/mapping.py +2 -4
  41. langchain_core/load/serializable.py +39 -40
  42. langchain_core/messages/__init__.py +61 -22
  43. langchain_core/messages/ai.py +368 -163
  44. langchain_core/messages/base.py +214 -43
  45. langchain_core/messages/block_translators/__init__.py +111 -0
  46. langchain_core/messages/block_translators/anthropic.py +470 -0
  47. langchain_core/messages/block_translators/bedrock.py +94 -0
  48. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  49. langchain_core/messages/block_translators/google_genai.py +530 -0
  50. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  51. langchain_core/messages/block_translators/groq.py +143 -0
  52. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  53. langchain_core/messages/block_translators/openai.py +1010 -0
  54. langchain_core/messages/chat.py +2 -6
  55. langchain_core/messages/content.py +1423 -0
  56. langchain_core/messages/function.py +6 -10
  57. langchain_core/messages/human.py +41 -38
  58. langchain_core/messages/modifier.py +2 -2
  59. langchain_core/messages/system.py +38 -28
  60. langchain_core/messages/tool.py +96 -103
  61. langchain_core/messages/utils.py +478 -504
  62. langchain_core/output_parsers/__init__.py +1 -14
  63. langchain_core/output_parsers/base.py +58 -61
  64. langchain_core/output_parsers/json.py +7 -8
  65. langchain_core/output_parsers/list.py +5 -7
  66. langchain_core/output_parsers/openai_functions.py +49 -47
  67. langchain_core/output_parsers/openai_tools.py +14 -19
  68. langchain_core/output_parsers/pydantic.py +12 -13
  69. langchain_core/output_parsers/string.py +2 -2
  70. langchain_core/output_parsers/transform.py +15 -17
  71. langchain_core/output_parsers/xml.py +8 -10
  72. langchain_core/outputs/__init__.py +1 -1
  73. langchain_core/outputs/chat_generation.py +18 -18
  74. langchain_core/outputs/chat_result.py +1 -3
  75. langchain_core/outputs/generation.py +8 -8
  76. langchain_core/outputs/llm_result.py +10 -10
  77. langchain_core/prompt_values.py +12 -12
  78. langchain_core/prompts/__init__.py +3 -27
  79. langchain_core/prompts/base.py +45 -55
  80. langchain_core/prompts/chat.py +254 -313
  81. langchain_core/prompts/dict.py +5 -5
  82. langchain_core/prompts/few_shot.py +81 -88
  83. langchain_core/prompts/few_shot_with_templates.py +11 -13
  84. langchain_core/prompts/image.py +12 -14
  85. langchain_core/prompts/loading.py +6 -8
  86. langchain_core/prompts/message.py +3 -3
  87. langchain_core/prompts/prompt.py +24 -39
  88. langchain_core/prompts/string.py +4 -4
  89. langchain_core/prompts/structured.py +42 -50
  90. langchain_core/rate_limiters.py +51 -60
  91. langchain_core/retrievers.py +49 -190
  92. langchain_core/runnables/base.py +1484 -1709
  93. langchain_core/runnables/branch.py +45 -61
  94. langchain_core/runnables/config.py +80 -88
  95. langchain_core/runnables/configurable.py +117 -134
  96. langchain_core/runnables/fallbacks.py +83 -79
  97. langchain_core/runnables/graph.py +85 -95
  98. langchain_core/runnables/graph_ascii.py +27 -28
  99. langchain_core/runnables/graph_mermaid.py +38 -50
  100. langchain_core/runnables/graph_png.py +15 -16
  101. langchain_core/runnables/history.py +135 -148
  102. langchain_core/runnables/passthrough.py +124 -150
  103. langchain_core/runnables/retry.py +46 -51
  104. langchain_core/runnables/router.py +25 -30
  105. langchain_core/runnables/schema.py +79 -74
  106. langchain_core/runnables/utils.py +62 -68
  107. langchain_core/stores.py +81 -115
  108. langchain_core/structured_query.py +8 -8
  109. langchain_core/sys_info.py +27 -29
  110. langchain_core/tools/__init__.py +1 -14
  111. langchain_core/tools/base.py +179 -187
  112. langchain_core/tools/convert.py +131 -139
  113. langchain_core/tools/render.py +10 -10
  114. langchain_core/tools/retriever.py +11 -11
  115. langchain_core/tools/simple.py +19 -24
  116. langchain_core/tools/structured.py +30 -39
  117. langchain_core/tracers/__init__.py +1 -9
  118. langchain_core/tracers/base.py +97 -99
  119. langchain_core/tracers/context.py +29 -52
  120. langchain_core/tracers/core.py +50 -60
  121. langchain_core/tracers/evaluation.py +11 -11
  122. langchain_core/tracers/event_stream.py +115 -70
  123. langchain_core/tracers/langchain.py +21 -21
  124. langchain_core/tracers/log_stream.py +43 -43
  125. langchain_core/tracers/memory_stream.py +3 -3
  126. langchain_core/tracers/root_listeners.py +16 -16
  127. langchain_core/tracers/run_collector.py +2 -4
  128. langchain_core/tracers/schemas.py +0 -129
  129. langchain_core/tracers/stdout.py +3 -3
  130. langchain_core/utils/__init__.py +1 -4
  131. langchain_core/utils/_merge.py +46 -8
  132. langchain_core/utils/aiter.py +57 -61
  133. langchain_core/utils/env.py +9 -9
  134. langchain_core/utils/function_calling.py +89 -191
  135. langchain_core/utils/html.py +7 -8
  136. langchain_core/utils/input.py +6 -6
  137. langchain_core/utils/interactive_env.py +1 -1
  138. langchain_core/utils/iter.py +37 -42
  139. langchain_core/utils/json.py +4 -3
  140. langchain_core/utils/json_schema.py +8 -8
  141. langchain_core/utils/mustache.py +9 -11
  142. langchain_core/utils/pydantic.py +33 -35
  143. langchain_core/utils/strings.py +5 -5
  144. langchain_core/utils/usage.py +1 -1
  145. langchain_core/utils/utils.py +80 -54
  146. langchain_core/vectorstores/base.py +129 -164
  147. langchain_core/vectorstores/in_memory.py +99 -174
  148. langchain_core/vectorstores/utils.py +5 -5
  149. langchain_core/version.py +1 -1
  150. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
  151. langchain_core-1.0.0.dist-info/RECORD +172 -0
  152. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  153. langchain_core/beta/__init__.py +0 -1
  154. langchain_core/beta/runnables/__init__.py +0 -1
  155. langchain_core/beta/runnables/context.py +0 -447
  156. langchain_core/memory.py +0 -120
  157. langchain_core/messages/content_blocks.py +0 -176
  158. langchain_core/prompts/pipeline.py +0 -138
  159. langchain_core/pydantic_v1/__init__.py +0 -30
  160. langchain_core/pydantic_v1/dataclasses.py +0 -23
  161. langchain_core/pydantic_v1/main.py +0 -23
  162. langchain_core/tracers/langchain_v1.py +0 -31
  163. langchain_core/utils/loading.py +0 -35
  164. langchain_core-0.3.79.dist-info/RECORD +0 -174
  165. langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
@@ -1,7 +1,7 @@
1
1
  """Output parsers using Pydantic."""
2
2
 
3
3
  import json
4
- from typing import Annotated, Generic, Optional
4
+ from typing import Annotated, Generic
5
5
 
6
6
  import pydantic
7
7
  from pydantic import SkipValidation
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
17
17
 
18
18
 
19
19
  class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
20
- """Parse an output using a pydantic model."""
20
+ """Parse an output using a Pydantic model."""
21
21
 
22
22
  pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
23
- """The pydantic model to parse."""
23
+ """The Pydantic model to parse."""
24
24
 
25
25
  def _parse_obj(self, obj: dict) -> TBaseModel:
26
26
  try:
@@ -44,22 +44,21 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
44
44
 
45
45
  def parse_result(
46
46
  self, result: list[Generation], *, partial: bool = False
47
- ) -> Optional[TBaseModel]:
48
- """Parse the result of an LLM call to a pydantic object.
47
+ ) -> TBaseModel | None:
48
+ """Parse the result of an LLM call to a Pydantic object.
49
49
 
50
50
  Args:
51
51
  result: The result of the LLM call.
52
52
  partial: Whether to parse partial JSON objects.
53
- If True, the output will be a JSON object containing
53
+ If `True`, the output will be a JSON object containing
54
54
  all the keys that have been returned so far.
55
- Defaults to False.
56
55
 
57
56
  Raises:
58
- OutputParserException: If the result is not valid JSON
59
- or does not conform to the pydantic model.
57
+ `OutputParserException`: If the result is not valid JSON
58
+ or does not conform to the Pydantic model.
60
59
 
61
60
  Returns:
62
- The parsed pydantic object.
61
+ The parsed Pydantic object.
63
62
  """
64
63
  try:
65
64
  json_object = super().parse_result(result)
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
70
69
  raise
71
70
 
72
71
  def parse(self, text: str) -> TBaseModel:
73
- """Parse the output of an LLM call to a pydantic object.
72
+ """Parse the output of an LLM call to a Pydantic object.
74
73
 
75
74
  Args:
76
75
  text: The output of the LLM call.
77
76
 
78
77
  Returns:
79
- The parsed pydantic object.
78
+ The parsed Pydantic object.
80
79
  """
81
80
  return super().parse(text)
82
81
 
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
107
106
  @property
108
107
  @override
109
108
  def OutputType(self) -> type[TBaseModel]:
110
- """Return the pydantic model."""
109
+ """Return the Pydantic model."""
111
110
  return self.pydantic_object
112
111
 
113
112
 
@@ -19,10 +19,10 @@ class StrOutputParser(BaseTransformOutputParser[str]):
19
19
 
20
20
  @classmethod
21
21
  def get_lc_namespace(cls) -> list[str]:
22
- """Get the namespace of the langchain object.
22
+ """Get the namespace of the LangChain object.
23
23
 
24
24
  Returns:
25
- ``["langchain", "schema", "output_parser"]``
25
+ `["langchain", "schema", "output_parser"]`
26
26
  """
27
27
  return ["langchain", "schema", "output_parser"]
28
28
 
@@ -5,8 +5,6 @@ from __future__ import annotations
5
5
  from typing import (
6
6
  TYPE_CHECKING,
7
7
  Any,
8
- Optional,
9
- Union,
10
8
  )
11
9
 
12
10
  from typing_extensions import override
@@ -32,7 +30,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
32
30
 
33
31
  def _transform(
34
32
  self,
35
- input: Iterator[Union[str, BaseMessage]],
33
+ input: Iterator[str | BaseMessage],
36
34
  ) -> Iterator[T]:
37
35
  for chunk in input:
38
36
  if isinstance(chunk, BaseMessage):
@@ -42,7 +40,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
42
40
 
43
41
  async def _atransform(
44
42
  self,
45
- input: AsyncIterator[Union[str, BaseMessage]],
43
+ input: AsyncIterator[str | BaseMessage],
46
44
  ) -> AsyncIterator[T]:
47
45
  async for chunk in input:
48
46
  if isinstance(chunk, BaseMessage):
@@ -57,8 +55,8 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
57
55
  @override
58
56
  def transform(
59
57
  self,
60
- input: Iterator[Union[str, BaseMessage]],
61
- config: Optional[RunnableConfig] = None,
58
+ input: Iterator[str | BaseMessage],
59
+ config: RunnableConfig | None = None,
62
60
  **kwargs: Any,
63
61
  ) -> Iterator[T]:
64
62
  """Transform the input into the output format.
@@ -66,7 +64,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
66
64
  Args:
67
65
  input: The input to transform.
68
66
  config: The configuration to use for the transformation.
69
- kwargs: Additional keyword arguments.
67
+ **kwargs: Additional keyword arguments.
70
68
 
71
69
  Yields:
72
70
  The transformed output.
@@ -78,8 +76,8 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
78
76
  @override
79
77
  async def atransform(
80
78
  self,
81
- input: AsyncIterator[Union[str, BaseMessage]],
82
- config: Optional[RunnableConfig] = None,
79
+ input: AsyncIterator[str | BaseMessage],
80
+ config: RunnableConfig | None = None,
83
81
  **kwargs: Any,
84
82
  ) -> AsyncIterator[T]:
85
83
  """Async transform the input into the output format.
@@ -87,7 +85,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
87
85
  Args:
88
86
  input: The input to transform.
89
87
  config: The configuration to use for the transformation.
90
- kwargs: Additional keyword arguments.
88
+ **kwargs: Additional keyword arguments.
91
89
 
92
90
  Yields:
93
91
  The transformed output.
@@ -108,7 +106,7 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
108
106
 
109
107
  def _diff(
110
108
  self,
111
- prev: Optional[T],
109
+ prev: T | None,
112
110
  next: T, # noqa: A002
113
111
  ) -> T:
114
112
  """Convert parsed outputs into a diff format.
@@ -125,11 +123,11 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
125
123
  raise NotImplementedError
126
124
 
127
125
  @override
128
- def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Any]:
126
+ def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[Any]:
129
127
  prev_parsed = None
130
- acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None
128
+ acc_gen: GenerationChunk | ChatGenerationChunk | None = None
131
129
  for chunk in input:
132
- chunk_gen: Union[GenerationChunk, ChatGenerationChunk]
130
+ chunk_gen: GenerationChunk | ChatGenerationChunk
133
131
  if isinstance(chunk, BaseMessageChunk):
134
132
  chunk_gen = ChatGenerationChunk(message=chunk)
135
133
  elif isinstance(chunk, BaseMessage):
@@ -151,12 +149,12 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
151
149
 
152
150
  @override
153
151
  async def _atransform(
154
- self, input: AsyncIterator[Union[str, BaseMessage]]
152
+ self, input: AsyncIterator[str | BaseMessage]
155
153
  ) -> AsyncIterator[T]:
156
154
  prev_parsed = None
157
- acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None
155
+ acc_gen: GenerationChunk | ChatGenerationChunk | None = None
158
156
  async for chunk in input:
159
- chunk_gen: Union[GenerationChunk, ChatGenerationChunk]
157
+ chunk_gen: GenerationChunk | ChatGenerationChunk
160
158
  if isinstance(chunk, BaseMessageChunk):
161
159
  chunk_gen = ChatGenerationChunk(message=chunk)
162
160
  elif isinstance(chunk, BaseMessage):
@@ -5,7 +5,7 @@ import re
5
5
  import xml
6
6
  import xml.etree.ElementTree as ET
7
7
  from collections.abc import AsyncIterator, Iterator
8
- from typing import Any, Literal, Optional, Union
8
+ from typing import Any, Literal
9
9
  from xml.etree.ElementTree import TreeBuilder
10
10
 
11
11
  from typing_extensions import override
@@ -75,14 +75,14 @@ class _StreamingParser:
75
75
  self.buffer = ""
76
76
  self.xml_started = False
77
77
 
78
- def parse(self, chunk: Union[str, BaseMessage]) -> Iterator[AddableDict]:
78
+ def parse(self, chunk: str | BaseMessage) -> Iterator[AddableDict]:
79
79
  """Parse a chunk of text.
80
80
 
81
81
  Args:
82
82
  chunk: A chunk of text to parse. This can be a string or a BaseMessage.
83
83
 
84
84
  Yields:
85
- AddableDict: A dictionary representing the parsed XML element.
85
+ A dictionary representing the parsed XML element.
86
86
 
87
87
  Raises:
88
88
  xml.etree.ElementTree.ParseError: If the XML is not well-formed.
@@ -149,7 +149,7 @@ class _StreamingParser:
149
149
  class XMLOutputParser(BaseTransformOutputParser):
150
150
  """Parse an output using xml format."""
151
151
 
152
- tags: Optional[list[str]] = None
152
+ tags: list[str] | None = None
153
153
  """Tags to tell the LLM to expect in the XML output.
154
154
 
155
155
  Note this may not be perfect depending on the LLM implementation.
@@ -193,7 +193,7 @@ class XMLOutputParser(BaseTransformOutputParser):
193
193
  """Return the format instructions for the XML output."""
194
194
  return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
195
195
 
196
- def parse(self, text: str) -> dict[str, Union[str, list[Any]]]:
196
+ def parse(self, text: str) -> dict[str, str | list[Any]]:
197
197
  """Parse the output of an LLM call.
198
198
 
199
199
  Args:
@@ -240,9 +240,7 @@ class XMLOutputParser(BaseTransformOutputParser):
240
240
  raise OutputParserException(msg, llm_output=text) from e
241
241
 
242
242
  @override
243
- def _transform(
244
- self, input: Iterator[Union[str, BaseMessage]]
245
- ) -> Iterator[AddableDict]:
243
+ def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[AddableDict]:
246
244
  streaming_parser = _StreamingParser(self.parser)
247
245
  for chunk in input:
248
246
  yield from streaming_parser.parse(chunk)
@@ -250,7 +248,7 @@ class XMLOutputParser(BaseTransformOutputParser):
250
248
 
251
249
  @override
252
250
  async def _atransform(
253
- self, input: AsyncIterator[Union[str, BaseMessage]]
251
+ self, input: AsyncIterator[str | BaseMessage]
254
252
  ) -> AsyncIterator[AddableDict]:
255
253
  streaming_parser = _StreamingParser(self.parser)
256
254
  async for chunk in input:
@@ -258,7 +256,7 @@ class XMLOutputParser(BaseTransformOutputParser):
258
256
  yield output
259
257
  streaming_parser.close()
260
258
 
261
- def _root_to_dict(self, root: ET.Element) -> dict[str, Union[str, list[Any]]]:
259
+ def _root_to_dict(self, root: ET.Element) -> dict[str, str | list[Any]]:
262
260
  """Converts xml tree to python dictionary."""
263
261
  if root.text and bool(re.search(r"\S", root.text)):
264
262
  # If root text contains any non-whitespace character it
@@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc.
12
12
  - LLMs will return regular text strings.
13
13
 
14
14
  In addition, users can access the raw output of either LLMs or chat models via
15
- callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
15
+ callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an
16
16
  LLMResult object containing the generated outputs and any additional information
17
17
  returned by the model provider.
18
18
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Literal, Union
5
+ from typing import Literal
6
6
 
7
7
  from pydantic import model_validator
8
8
  from typing_extensions import Self
@@ -15,21 +15,21 @@ from langchain_core.utils._merge import merge_dicts
15
15
  class ChatGeneration(Generation):
16
16
  """A single chat generation output.
17
17
 
18
- A subclass of ``Generation`` that represents the response from a chat model
18
+ A subclass of `Generation` that represents the response from a chat model
19
19
  that generates chat messages.
20
20
 
21
- The ``message`` attribute is a structured representation of the chat message.
22
- Most of the time, the message will be of type ``AIMessage``.
21
+ The `message` attribute is a structured representation of the chat message.
22
+ Most of the time, the message will be of type `AIMessage`.
23
23
 
24
24
  Users working with chat models will usually access information via either
25
- ``AIMessage`` (returned from runnable interfaces) or ``LLMResult`` (available
25
+ `AIMessage` (returned from runnable interfaces) or `LLMResult` (available
26
26
  via callbacks).
27
27
  """
28
28
 
29
29
  text: str = ""
30
30
  """The text contents of the output message.
31
31
 
32
- .. warning::
32
+ !!! warning
33
33
  SHOULD NOT BE SET DIRECTLY!
34
34
 
35
35
  """
@@ -70,9 +70,9 @@ class ChatGeneration(Generation):
70
70
 
71
71
 
72
72
  class ChatGenerationChunk(ChatGeneration):
73
- """``ChatGeneration`` chunk.
73
+ """`ChatGeneration` chunk.
74
74
 
75
- ``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
75
+ `ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
76
76
  """
77
77
 
78
78
  message: BaseMessageChunk
@@ -82,20 +82,20 @@ class ChatGenerationChunk(ChatGeneration):
82
82
  """Type is used exclusively for serialization purposes."""
83
83
 
84
84
  def __add__(
85
- self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
85
+ self, other: ChatGenerationChunk | list[ChatGenerationChunk]
86
86
  ) -> ChatGenerationChunk:
87
- """Concatenate two ``ChatGenerationChunk``s.
87
+ """Concatenate two `ChatGenerationChunk`s.
88
88
 
89
89
  Args:
90
- other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``
90
+ other: The other `ChatGenerationChunk` or list of `ChatGenerationChunk`
91
91
  to concatenate.
92
92
 
93
93
  Raises:
94
- TypeError: If other is not a ``ChatGenerationChunk`` or list of
95
- ``ChatGenerationChunk``.
94
+ TypeError: If other is not a `ChatGenerationChunk` or list of
95
+ `ChatGenerationChunk`.
96
96
 
97
97
  Returns:
98
- A new ``ChatGenerationChunk`` concatenated from self and other.
98
+ A new `ChatGenerationChunk` concatenated from self and other.
99
99
  """
100
100
  if isinstance(other, ChatGenerationChunk):
101
101
  generation_info = merge_dicts(
@@ -123,14 +123,14 @@ class ChatGenerationChunk(ChatGeneration):
123
123
 
124
124
  def merge_chat_generation_chunks(
125
125
  chunks: list[ChatGenerationChunk],
126
- ) -> Union[ChatGenerationChunk, None]:
127
- """Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
126
+ ) -> ChatGenerationChunk | None:
127
+ """Merge a list of `ChatGenerationChunk`s into a single `ChatGenerationChunk`.
128
128
 
129
129
  Args:
130
- chunks: A list of ``ChatGenerationChunk`` to merge.
130
+ chunks: A list of `ChatGenerationChunk` to merge.
131
131
 
132
132
  Returns:
133
- A merged ``ChatGenerationChunk``, or None if the input list is empty.
133
+ A merged `ChatGenerationChunk`, or None if the input list is empty.
134
134
  """
135
135
  if not chunks:
136
136
  return None
@@ -1,7 +1,5 @@
1
1
  """Chat result schema."""
2
2
 
3
- from typing import Optional
4
-
5
3
  from pydantic import BaseModel
6
4
 
7
5
  from langchain_core.outputs.chat_generation import ChatGeneration
@@ -26,7 +24,7 @@ class ChatResult(BaseModel):
26
24
  Generations is a list to allow for multiple candidate generations for a single
27
25
  input prompt.
28
26
  """
29
- llm_output: Optional[dict] = None
27
+ llm_output: dict | None = None
30
28
  """For arbitrary LLM provider specific output.
31
29
 
32
30
  This dictionary is a free-form dictionary that can contain any information that the
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Literal, Optional
5
+ from typing import Any, Literal
6
6
 
7
7
  from langchain_core.load import Serializable
8
8
  from langchain_core.utils._merge import merge_dicts
@@ -28,7 +28,7 @@ class Generation(Serializable):
28
28
  text: str
29
29
  """Generated text output."""
30
30
 
31
- generation_info: Optional[dict[str, Any]] = None
31
+ generation_info: dict[str, Any] | None = None
32
32
  """Raw response from the provider.
33
33
 
34
34
  May include things like the reason for finishing or token log probabilities.
@@ -44,10 +44,10 @@ class Generation(Serializable):
44
44
 
45
45
  @classmethod
46
46
  def get_lc_namespace(cls) -> list[str]:
47
- """Get the namespace of the langchain object.
47
+ """Get the namespace of the LangChain object.
48
48
 
49
49
  Returns:
50
- ``["langchain", "schema", "output"]``
50
+ `["langchain", "schema", "output"]`
51
51
  """
52
52
  return ["langchain", "schema", "output"]
53
53
 
@@ -56,16 +56,16 @@ class GenerationChunk(Generation):
56
56
  """Generation chunk, which can be concatenated with other Generation chunks."""
57
57
 
58
58
  def __add__(self, other: GenerationChunk) -> GenerationChunk:
59
- """Concatenate two ``GenerationChunk``s.
59
+ """Concatenate two `GenerationChunk`s.
60
60
 
61
61
  Args:
62
- other: Another ``GenerationChunk`` to concatenate with.
62
+ other: Another `GenerationChunk` to concatenate with.
63
63
 
64
64
  Raises:
65
- TypeError: If other is not a ``GenerationChunk``.
65
+ TypeError: If other is not a `GenerationChunk`.
66
66
 
67
67
  Returns:
68
- A new ``GenerationChunk`` concatenated from self and other.
68
+ A new `GenerationChunk` concatenated from self and other.
69
69
  """
70
70
  if isinstance(other, GenerationChunk):
71
71
  generation_info = merge_dicts(
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from copy import deepcopy
6
- from typing import Literal, Optional, Union
6
+ from typing import Literal
7
7
 
8
8
  from pydantic import BaseModel
9
9
 
@@ -21,7 +21,7 @@ class LLMResult(BaseModel):
21
21
  """
22
22
 
23
23
  generations: list[
24
- list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
24
+ list[Generation | ChatGeneration | GenerationChunk | ChatGenerationChunk]
25
25
  ]
26
26
  """Generated outputs.
27
27
 
@@ -30,13 +30,13 @@ class LLMResult(BaseModel):
30
30
  The second dimension of the list represents different candidate generations for a
31
31
  given prompt.
32
32
 
33
- - When returned from **an LLM**, the type is ``list[list[Generation]]``.
34
- - When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
33
+ - When returned from **an LLM**, the type is `list[list[Generation]]`.
34
+ - When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
35
35
 
36
36
  ChatGeneration is a subclass of Generation that has a field for a structured chat
37
37
  message.
38
38
  """
39
- llm_output: Optional[dict] = None
39
+ llm_output: dict | None = None
40
40
  """For arbitrary LLM provider specific output.
41
41
 
42
42
  This dictionary is a free-form dictionary that can contain any information that the
@@ -45,10 +45,10 @@ class LLMResult(BaseModel):
45
45
  Users should generally avoid relying on this field and instead rely on accessing
46
46
  relevant information from standardized fields present in AIMessage.
47
47
  """
48
- run: Optional[list[RunInfo]] = None
48
+ run: list[RunInfo] | None = None
49
49
  """List of metadata info for model call for each input.
50
50
 
51
- See :class:`~langchain_core.outputs.run_info.RunInfo` for details.
51
+ See `langchain_core.outputs.run_info.RunInfo` for details.
52
52
  """
53
53
 
54
54
  type: Literal["LLMResult"] = "LLMResult"
@@ -91,13 +91,13 @@ class LLMResult(BaseModel):
91
91
  return llm_results
92
92
 
93
93
  def __eq__(self, other: object) -> bool:
94
- """Check for ``LLMResult`` equality by ignoring any metadata related to runs.
94
+ """Check for `LLMResult` equality by ignoring any metadata related to runs.
95
95
 
96
96
  Args:
97
- other: Another ``LLMResult`` object to compare against.
97
+ other: Another `LLMResult` object to compare against.
98
98
 
99
99
  Returns:
100
- True if the generations and ``llm_output`` are equal, False otherwise.
100
+ `True` if the generations and `llm_output` are equal, `False` otherwise.
101
101
  """
102
102
  if not isinstance(other, LLMResult):
103
103
  return NotImplemented
@@ -24,8 +24,8 @@ from langchain_core.messages import (
24
24
  class PromptValue(Serializable, ABC):
25
25
  """Base abstract class for inputs to any language model.
26
26
 
27
- PromptValues can be converted to both LLM (pure text-generation) inputs and
28
- ChatModel inputs.
27
+ `PromptValues` can be converted to both LLM (pure text-generation) inputs and
28
+ chat model inputs.
29
29
  """
30
30
 
31
31
  @classmethod
@@ -35,12 +35,12 @@ class PromptValue(Serializable, ABC):
35
35
 
36
36
  @classmethod
37
37
  def get_lc_namespace(cls) -> list[str]:
38
- """Get the namespace of the langchain object.
38
+ """Get the namespace of the LangChain object.
39
39
 
40
40
  This is used to determine the namespace of the object when serializing.
41
41
 
42
42
  Returns:
43
- ``["langchain", "schema", "prompt"]``
43
+ `["langchain", "schema", "prompt"]`
44
44
  """
45
45
  return ["langchain", "schema", "prompt"]
46
46
 
@@ -62,12 +62,12 @@ class StringPromptValue(PromptValue):
62
62
 
63
63
  @classmethod
64
64
  def get_lc_namespace(cls) -> list[str]:
65
- """Get the namespace of the langchain object.
65
+ """Get the namespace of the LangChain object.
66
66
 
67
67
  This is used to determine the namespace of the object when serializing.
68
68
 
69
69
  Returns:
70
- ``["langchain", "prompts", "base"]``
70
+ `["langchain", "prompts", "base"]`
71
71
  """
72
72
  return ["langchain", "prompts", "base"]
73
73
 
@@ -99,12 +99,12 @@ class ChatPromptValue(PromptValue):
99
99
 
100
100
  @classmethod
101
101
  def get_lc_namespace(cls) -> list[str]:
102
- """Get the namespace of the langchain object.
102
+ """Get the namespace of the LangChain object.
103
103
 
104
104
  This is used to determine the namespace of the object when serializing.
105
105
 
106
106
  Returns:
107
- ``["langchain", "prompts", "chat"]``
107
+ `["langchain", "prompts", "chat"]`
108
108
  """
109
109
  return ["langchain", "prompts", "chat"]
110
110
 
@@ -113,11 +113,11 @@ class ImageURL(TypedDict, total=False):
113
113
  """Image URL."""
114
114
 
115
115
  detail: Literal["auto", "low", "high"]
116
- """Specifies the detail level of the image. Defaults to ``'auto'``.
117
- Can be ``'auto'``, ``'low'``, or ``'high'``.
116
+ """Specifies the detail level of the image.
118
117
 
119
- This follows OpenAI's Chat Completion API's image URL format.
118
+ Can be `'auto'`, `'low'`, or `'high'`.
120
119
 
120
+ This follows OpenAI's Chat Completion API's image URL format.
121
121
  """
122
122
 
123
123
  url: str
@@ -133,7 +133,7 @@ class ImagePromptValue(PromptValue):
133
133
 
134
134
  def to_string(self) -> str:
135
135
  """Return prompt (image URL) as string."""
136
- return self.image_url["url"]
136
+ return self.image_url.get("url", "")
137
137
 
138
138
  def to_messages(self) -> list[BaseMessage]:
139
139
  """Return prompt (image URL) as messages."""
@@ -1,29 +1,8 @@
1
1
  """**Prompt** is the input to the model.
2
2
 
3
- Prompt is often constructed
4
- from multiple components and prompt values. Prompt classes and functions make constructing
5
- and working with prompts easy.
6
-
7
- **Class hierarchy:**
8
-
9
- .. code-block::
10
-
11
- BasePromptTemplate --> PipelinePromptTemplate
12
- StringPromptTemplate --> PromptTemplate
13
- FewShotPromptTemplate
14
- FewShotPromptWithTemplates
15
- BaseChatPromptTemplate --> AutoGPTPrompt
16
- ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
17
-
18
-
19
-
20
- BaseMessagePromptTemplate --> MessagesPlaceholder
21
- BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
22
- HumanMessagePromptTemplate
23
- AIMessagePromptTemplate
24
- SystemMessagePromptTemplate
25
-
26
- """ # noqa: E501
3
+ Prompt is often constructed from multiple components and prompt values. Prompt classes
4
+ and functions make constructing and working with prompts easy.
5
+ """
27
6
 
28
7
  from typing import TYPE_CHECKING
29
8
 
@@ -53,7 +32,6 @@ if TYPE_CHECKING:
53
32
  FewShotPromptWithTemplates,
54
33
  )
55
34
  from langchain_core.prompts.loading import load_prompt
56
- from langchain_core.prompts.pipeline import PipelinePromptTemplate
57
35
  from langchain_core.prompts.prompt import PromptTemplate
58
36
  from langchain_core.prompts.string import (
59
37
  StringPromptTemplate,
@@ -75,7 +53,6 @@ __all__ = (
75
53
  "FewShotPromptWithTemplates",
76
54
  "HumanMessagePromptTemplate",
77
55
  "MessagesPlaceholder",
78
- "PipelinePromptTemplate",
79
56
  "PromptTemplate",
80
57
  "StringPromptTemplate",
81
58
  "SystemMessagePromptTemplate",
@@ -104,7 +81,6 @@ _dynamic_imports = {
104
81
  "FewShotPromptTemplate": "few_shot",
105
82
  "FewShotPromptWithTemplates": "few_shot_with_templates",
106
83
  "load_prompt": "loading",
107
- "PipelinePromptTemplate": "pipeline",
108
84
  "PromptTemplate": "prompt",
109
85
  "StringPromptTemplate": "string",
110
86
  "check_valid_template": "string",