langchain-core 1.0.0a6__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +51 -64
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +55 -48
  8. langchain_core/caches.py +65 -66
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +321 -336
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +454 -514
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +53 -68
  17. langchain_core/document_loaders/base.py +27 -25
  18. langchain_core/document_loaders/blob_loaders.py +1 -1
  19. langchain_core/document_loaders/langsmith.py +44 -48
  20. langchain_core/documents/__init__.py +23 -3
  21. langchain_core/documents/base.py +102 -94
  22. langchain_core/documents/compressor.py +10 -10
  23. langchain_core/documents/transformers.py +34 -35
  24. langchain_core/embeddings/fake.py +50 -54
  25. langchain_core/example_selectors/length_based.py +2 -2
  26. langchain_core/example_selectors/semantic_similarity.py +28 -32
  27. langchain_core/exceptions.py +21 -20
  28. langchain_core/globals.py +3 -151
  29. langchain_core/indexing/__init__.py +1 -1
  30. langchain_core/indexing/api.py +121 -126
  31. langchain_core/indexing/base.py +73 -75
  32. langchain_core/indexing/in_memory.py +4 -6
  33. langchain_core/language_models/__init__.py +14 -29
  34. langchain_core/language_models/_utils.py +58 -61
  35. langchain_core/language_models/base.py +82 -172
  36. langchain_core/language_models/chat_models.py +329 -402
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +42 -36
  39. langchain_core/language_models/llms.py +189 -269
  40. langchain_core/load/dump.py +9 -12
  41. langchain_core/load/load.py +18 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +42 -40
  44. langchain_core/messages/__init__.py +10 -16
  45. langchain_core/messages/ai.py +148 -148
  46. langchain_core/messages/base.py +53 -51
  47. langchain_core/messages/block_translators/__init__.py +19 -22
  48. langchain_core/messages/block_translators/anthropic.py +6 -6
  49. langchain_core/messages/block_translators/bedrock_converse.py +5 -5
  50. langchain_core/messages/block_translators/google_genai.py +10 -7
  51. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  52. langchain_core/messages/block_translators/groq.py +117 -21
  53. langchain_core/messages/block_translators/langchain_v0.py +5 -5
  54. langchain_core/messages/block_translators/openai.py +11 -11
  55. langchain_core/messages/chat.py +2 -6
  56. langchain_core/messages/content.py +339 -330
  57. langchain_core/messages/function.py +6 -10
  58. langchain_core/messages/human.py +24 -31
  59. langchain_core/messages/modifier.py +2 -2
  60. langchain_core/messages/system.py +19 -29
  61. langchain_core/messages/tool.py +74 -90
  62. langchain_core/messages/utils.py +484 -510
  63. langchain_core/output_parsers/__init__.py +13 -10
  64. langchain_core/output_parsers/base.py +61 -61
  65. langchain_core/output_parsers/format_instructions.py +9 -4
  66. langchain_core/output_parsers/json.py +12 -10
  67. langchain_core/output_parsers/list.py +21 -23
  68. langchain_core/output_parsers/openai_functions.py +49 -47
  69. langchain_core/output_parsers/openai_tools.py +30 -23
  70. langchain_core/output_parsers/pydantic.py +13 -14
  71. langchain_core/output_parsers/string.py +5 -5
  72. langchain_core/output_parsers/transform.py +15 -17
  73. langchain_core/output_parsers/xml.py +35 -34
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +18 -18
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +16 -16
  78. langchain_core/outputs/llm_result.py +10 -10
  79. langchain_core/prompt_values.py +13 -19
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +81 -86
  82. langchain_core/prompts/chat.py +308 -351
  83. langchain_core/prompts/dict.py +6 -6
  84. langchain_core/prompts/few_shot.py +81 -88
  85. langchain_core/prompts/few_shot_with_templates.py +11 -13
  86. langchain_core/prompts/image.py +12 -14
  87. langchain_core/prompts/loading.py +4 -6
  88. langchain_core/prompts/message.py +7 -7
  89. langchain_core/prompts/prompt.py +24 -39
  90. langchain_core/prompts/string.py +26 -10
  91. langchain_core/prompts/structured.py +49 -53
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +61 -198
  94. langchain_core/runnables/base.py +1551 -1656
  95. langchain_core/runnables/branch.py +68 -70
  96. langchain_core/runnables/config.py +72 -89
  97. langchain_core/runnables/configurable.py +145 -161
  98. langchain_core/runnables/fallbacks.py +102 -96
  99. langchain_core/runnables/graph.py +91 -97
  100. langchain_core/runnables/graph_ascii.py +27 -28
  101. langchain_core/runnables/graph_mermaid.py +42 -51
  102. langchain_core/runnables/graph_png.py +43 -16
  103. langchain_core/runnables/history.py +175 -177
  104. langchain_core/runnables/passthrough.py +151 -167
  105. langchain_core/runnables/retry.py +46 -51
  106. langchain_core/runnables/router.py +30 -35
  107. langchain_core/runnables/schema.py +75 -80
  108. langchain_core/runnables/utils.py +60 -67
  109. langchain_core/stores.py +85 -121
  110. langchain_core/structured_query.py +8 -8
  111. langchain_core/sys_info.py +29 -29
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +306 -245
  114. langchain_core/tools/convert.py +160 -155
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -11
  117. langchain_core/tools/simple.py +19 -24
  118. langchain_core/tools/structured.py +32 -39
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/base.py +97 -99
  121. langchain_core/tracers/context.py +29 -52
  122. langchain_core/tracers/core.py +49 -53
  123. langchain_core/tracers/evaluation.py +11 -11
  124. langchain_core/tracers/event_stream.py +65 -64
  125. langchain_core/tracers/langchain.py +21 -21
  126. langchain_core/tracers/log_stream.py +45 -45
  127. langchain_core/tracers/memory_stream.py +3 -3
  128. langchain_core/tracers/root_listeners.py +16 -16
  129. langchain_core/tracers/run_collector.py +2 -4
  130. langchain_core/tracers/schemas.py +0 -129
  131. langchain_core/tracers/stdout.py +3 -3
  132. langchain_core/utils/__init__.py +1 -4
  133. langchain_core/utils/_merge.py +2 -2
  134. langchain_core/utils/aiter.py +57 -61
  135. langchain_core/utils/env.py +9 -9
  136. langchain_core/utils/function_calling.py +94 -188
  137. langchain_core/utils/html.py +7 -8
  138. langchain_core/utils/input.py +9 -6
  139. langchain_core/utils/interactive_env.py +1 -1
  140. langchain_core/utils/iter.py +36 -40
  141. langchain_core/utils/json.py +4 -3
  142. langchain_core/utils/json_schema.py +9 -9
  143. langchain_core/utils/mustache.py +8 -10
  144. langchain_core/utils/pydantic.py +35 -37
  145. langchain_core/utils/strings.py +6 -9
  146. langchain_core/utils/usage.py +1 -1
  147. langchain_core/utils/utils.py +66 -62
  148. langchain_core/vectorstores/base.py +182 -216
  149. langchain_core/vectorstores/in_memory.py +101 -176
  150. langchain_core/vectorstores/utils.py +5 -5
  151. langchain_core/version.py +1 -1
  152. langchain_core-1.0.4.dist-info/METADATA +69 -0
  153. langchain_core-1.0.4.dist-info/RECORD +172 -0
  154. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.4.dist-info}/WHEEL +1 -1
  155. langchain_core/memory.py +0 -120
  156. langchain_core/messages/block_translators/ollama.py +0 -47
  157. langchain_core/prompts/pipeline.py +0 -138
  158. langchain_core/pydantic_v1/__init__.py +0 -30
  159. langchain_core/pydantic_v1/dataclasses.py +0 -23
  160. langchain_core/pydantic_v1/main.py +0 -23
  161. langchain_core/tracers/langchain_v1.py +0 -31
  162. langchain_core/utils/loading.py +0 -35
  163. langchain_core-1.0.0a6.dist-info/METADATA +0 -67
  164. langchain_core-1.0.0a6.dist-info/RECORD +0 -181
  165. langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
@@ -5,7 +5,7 @@ import re
5
5
  import xml
6
6
  import xml.etree.ElementTree as ET
7
7
  from collections.abc import AsyncIterator, Iterator
8
- from typing import Any, Literal, Optional, Union
8
+ from typing import Any, Literal
9
9
  from xml.etree.ElementTree import TreeBuilder
10
10
 
11
11
  from typing_extensions import override
@@ -43,19 +43,19 @@ class _StreamingParser:
43
43
  """Streaming parser for XML.
44
44
 
45
45
  This implementation is pulled into a class to avoid implementation
46
- drift between transform and atransform of the XMLOutputParser.
46
+ drift between transform and atransform of the `XMLOutputParser`.
47
47
  """
48
48
 
49
49
  def __init__(self, parser: Literal["defusedxml", "xml"]) -> None:
50
50
  """Initialize the streaming parser.
51
51
 
52
52
  Args:
53
- parser: Parser to use for XML parsing. Can be either 'defusedxml' or 'xml'.
54
- See documentation in XMLOutputParser for more information.
53
+ parser: Parser to use for XML parsing. Can be either `'defusedxml'` or
54
+ `'xml'`. See documentation in `XMLOutputParser` for more information.
55
55
 
56
56
  Raises:
57
- ImportError: If defusedxml is not installed and the defusedxml
58
- parser is requested.
57
+ ImportError: If `defusedxml` is not installed and the `defusedxml` parser is
58
+ requested.
59
59
  """
60
60
  if parser == "defusedxml":
61
61
  if not _HAS_DEFUSEDXML:
@@ -75,14 +75,14 @@ class _StreamingParser:
75
75
  self.buffer = ""
76
76
  self.xml_started = False
77
77
 
78
- def parse(self, chunk: Union[str, BaseMessage]) -> Iterator[AddableDict]:
78
+ def parse(self, chunk: str | BaseMessage) -> Iterator[AddableDict]:
79
79
  """Parse a chunk of text.
80
80
 
81
81
  Args:
82
- chunk: A chunk of text to parse. This can be a string or a BaseMessage.
82
+ chunk: A chunk of text to parse. This can be a `str` or a `BaseMessage`.
83
83
 
84
84
  Yields:
85
- AddableDict: A dictionary representing the parsed XML element.
85
+ A `dict` representing the parsed XML element.
86
86
 
87
87
  Raises:
88
88
  xml.etree.ElementTree.ParseError: If the XML is not well-formed.
@@ -147,65 +147,68 @@ class _StreamingParser:
147
147
 
148
148
 
149
149
  class XMLOutputParser(BaseTransformOutputParser):
150
- """Parse an output using xml format."""
150
+ """Parse an output using xml format.
151
151
 
152
- tags: Optional[list[str]] = None
152
+ Returns a dictionary of tags.
153
+ """
154
+
155
+ tags: list[str] | None = None
153
156
  """Tags to tell the LLM to expect in the XML output.
154
157
 
155
158
  Note this may not be perfect depending on the LLM implementation.
156
159
 
157
- For example, with tags=["foo", "bar", "baz"]:
160
+ For example, with `tags=["foo", "bar", "baz"]`:
158
161
 
159
162
  1. A well-formatted XML instance:
160
- "<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"
163
+ `"<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"`
161
164
 
162
165
  2. A badly-formatted XML instance (missing closing tag for 'bar'):
163
- "<foo>\n <bar>\n </foo>"
166
+ `"<foo>\n <bar>\n </foo>"`
164
167
 
165
168
  3. A badly-formatted XML instance (unexpected 'tag' element):
166
- "<foo>\n <tag>\n </tag>\n</foo>"
169
+ `"<foo>\n <tag>\n </tag>\n</foo>"`
167
170
  """
168
171
  encoding_matcher: re.Pattern = re.compile(
169
172
  r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL
170
173
  )
171
174
  parser: Literal["defusedxml", "xml"] = "defusedxml"
172
- """Parser to use for XML parsing. Can be either 'defusedxml' or 'xml'.
175
+ """Parser to use for XML parsing. Can be either `'defusedxml'` or `'xml'`.
173
176
 
174
- * 'defusedxml' is the default parser and is used to prevent XML vulnerabilities
175
- present in some distributions of Python's standard library xml.
176
- `defusedxml` is a wrapper around the standard library parser that
177
- sets up the parser with secure defaults.
178
- * 'xml' is the standard library parser.
177
+ * `'defusedxml'` is the default parser and is used to prevent XML vulnerabilities
178
+ present in some distributions of Python's standard library xml.
179
+ `defusedxml` is a wrapper around the standard library parser that
180
+ sets up the parser with secure defaults.
181
+ * `'xml'` is the standard library parser.
179
182
 
180
- Use `xml` only if you are sure that your distribution of the standard library
181
- is not vulnerable to XML vulnerabilities.
183
+ Use `xml` only if you are sure that your distribution of the standard library is not
184
+ vulnerable to XML vulnerabilities.
182
185
 
183
186
  Please review the following resources for more information:
184
187
 
185
188
  * https://docs.python.org/3/library/xml.html#xml-vulnerabilities
186
189
  * https://github.com/tiran/defusedxml
187
190
 
188
- The standard library relies on libexpat for parsing XML:
189
- https://github.com/libexpat/libexpat
191
+ The standard library relies on [`libexpat`](https://github.com/libexpat/libexpat)
192
+ for parsing XML.
190
193
  """
191
194
 
192
195
  def get_format_instructions(self) -> str:
193
196
  """Return the format instructions for the XML output."""
194
197
  return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
195
198
 
196
- def parse(self, text: str) -> dict[str, Union[str, list[Any]]]:
199
+ def parse(self, text: str) -> dict[str, str | list[Any]]:
197
200
  """Parse the output of an LLM call.
198
201
 
199
202
  Args:
200
203
  text: The output of an LLM call.
201
204
 
202
205
  Returns:
203
- A dictionary representing the parsed XML.
206
+ A `dict` representing the parsed XML.
204
207
 
205
208
  Raises:
206
209
  OutputParserException: If the XML is not well-formed.
207
- ImportError: If defusedxml is not installed and the defusedxml
208
- parser is requested.
210
+ ImportError: If defus`edxml is not installed and the `defusedxml` parser is
211
+ requested.
209
212
  """
210
213
  # Try to find XML string within triple backticks
211
214
  # Imports are temporarily placed here to avoid issue with caching on CI
@@ -240,9 +243,7 @@ class XMLOutputParser(BaseTransformOutputParser):
240
243
  raise OutputParserException(msg, llm_output=text) from e
241
244
 
242
245
  @override
243
- def _transform(
244
- self, input: Iterator[Union[str, BaseMessage]]
245
- ) -> Iterator[AddableDict]:
246
+ def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[AddableDict]:
246
247
  streaming_parser = _StreamingParser(self.parser)
247
248
  for chunk in input:
248
249
  yield from streaming_parser.parse(chunk)
@@ -250,7 +251,7 @@ class XMLOutputParser(BaseTransformOutputParser):
250
251
 
251
252
  @override
252
253
  async def _atransform(
253
- self, input: AsyncIterator[Union[str, BaseMessage]]
254
+ self, input: AsyncIterator[str | BaseMessage]
254
255
  ) -> AsyncIterator[AddableDict]:
255
256
  streaming_parser = _StreamingParser(self.parser)
256
257
  async for chunk in input:
@@ -258,7 +259,7 @@ class XMLOutputParser(BaseTransformOutputParser):
258
259
  yield output
259
260
  streaming_parser.close()
260
261
 
261
- def _root_to_dict(self, root: ET.Element) -> dict[str, Union[str, list[Any]]]:
262
+ def _root_to_dict(self, root: ET.Element) -> dict[str, str | list[Any]]:
262
263
  """Converts xml tree to python dictionary."""
263
264
  if root.text and bool(re.search(r"\S", root.text)):
264
265
  # If root text contains any non-whitespace character it
@@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc.
12
12
  - LLMs will return regular text strings.
13
13
 
14
14
  In addition, users can access the raw output of either LLMs or chat models via
15
- callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
15
+ callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an
16
16
  LLMResult object containing the generated outputs and any additional information
17
17
  returned by the model provider.
18
18
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Literal, Union
5
+ from typing import Literal
6
6
 
7
7
  from pydantic import model_validator
8
8
  from typing_extensions import Self
@@ -15,21 +15,21 @@ from langchain_core.utils._merge import merge_dicts
15
15
  class ChatGeneration(Generation):
16
16
  """A single chat generation output.
17
17
 
18
- A subclass of ``Generation`` that represents the response from a chat model
18
+ A subclass of `Generation` that represents the response from a chat model
19
19
  that generates chat messages.
20
20
 
21
- The ``message`` attribute is a structured representation of the chat message.
22
- Most of the time, the message will be of type ``AIMessage``.
21
+ The `message` attribute is a structured representation of the chat message.
22
+ Most of the time, the message will be of type `AIMessage`.
23
23
 
24
24
  Users working with chat models will usually access information via either
25
- ``AIMessage`` (returned from runnable interfaces) or ``LLMResult`` (available
25
+ `AIMessage` (returned from runnable interfaces) or `LLMResult` (available
26
26
  via callbacks).
27
27
  """
28
28
 
29
29
  text: str = ""
30
30
  """The text contents of the output message.
31
31
 
32
- .. warning::
32
+ !!! warning
33
33
  SHOULD NOT BE SET DIRECTLY!
34
34
 
35
35
  """
@@ -70,9 +70,9 @@ class ChatGeneration(Generation):
70
70
 
71
71
 
72
72
  class ChatGenerationChunk(ChatGeneration):
73
- """``ChatGeneration`` chunk.
73
+ """`ChatGeneration` chunk.
74
74
 
75
- ``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
75
+ `ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
76
76
  """
77
77
 
78
78
  message: BaseMessageChunk
@@ -82,20 +82,20 @@ class ChatGenerationChunk(ChatGeneration):
82
82
  """Type is used exclusively for serialization purposes."""
83
83
 
84
84
  def __add__(
85
- self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
85
+ self, other: ChatGenerationChunk | list[ChatGenerationChunk]
86
86
  ) -> ChatGenerationChunk:
87
- """Concatenate two ``ChatGenerationChunk``s.
87
+ """Concatenate two `ChatGenerationChunk`s.
88
88
 
89
89
  Args:
90
- other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``
90
+ other: The other `ChatGenerationChunk` or list of `ChatGenerationChunk`
91
91
  to concatenate.
92
92
 
93
93
  Raises:
94
- TypeError: If other is not a ``ChatGenerationChunk`` or list of
95
- ``ChatGenerationChunk``.
94
+ TypeError: If other is not a `ChatGenerationChunk` or list of
95
+ `ChatGenerationChunk`.
96
96
 
97
97
  Returns:
98
- A new ``ChatGenerationChunk`` concatenated from self and other.
98
+ A new `ChatGenerationChunk` concatenated from self and other.
99
99
  """
100
100
  if isinstance(other, ChatGenerationChunk):
101
101
  generation_info = merge_dicts(
@@ -123,14 +123,14 @@ class ChatGenerationChunk(ChatGeneration):
123
123
 
124
124
  def merge_chat_generation_chunks(
125
125
  chunks: list[ChatGenerationChunk],
126
- ) -> Union[ChatGenerationChunk, None]:
127
- """Merge a list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.
126
+ ) -> ChatGenerationChunk | None:
127
+ """Merge a list of `ChatGenerationChunk`s into a single `ChatGenerationChunk`.
128
128
 
129
129
  Args:
130
- chunks: A list of ``ChatGenerationChunk`` to merge.
130
+ chunks: A list of `ChatGenerationChunk` to merge.
131
131
 
132
132
  Returns:
133
- A merged ``ChatGenerationChunk``, or None if the input list is empty.
133
+ A merged `ChatGenerationChunk`, or None if the input list is empty.
134
134
  """
135
135
  if not chunks:
136
136
  return None
@@ -1,7 +1,5 @@
1
1
  """Chat result schema."""
2
2
 
3
- from typing import Optional
4
-
5
3
  from pydantic import BaseModel
6
4
 
7
5
  from langchain_core.outputs.chat_generation import ChatGeneration
@@ -26,7 +24,7 @@ class ChatResult(BaseModel):
26
24
  Generations is a list to allow for multiple candidate generations for a single
27
25
  input prompt.
28
26
  """
29
- llm_output: Optional[dict] = None
27
+ llm_output: dict | None = None
30
28
  """For arbitrary LLM provider specific output.
31
29
 
32
30
  This dictionary is a free-form dictionary that can contain any information that the
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Literal, Optional
5
+ from typing import Any, Literal
6
6
 
7
7
  from langchain_core.load import Serializable
8
8
  from langchain_core.utils._merge import merge_dicts
@@ -11,9 +11,8 @@ from langchain_core.utils._merge import merge_dicts
11
11
  class Generation(Serializable):
12
12
  """A single text generation output.
13
13
 
14
- Generation represents the response from an
15
- `"old-fashioned" LLM <https://python.langchain.com/docs/concepts/text_llms/>__` that
16
- generates regular text (not chat messages).
14
+ Generation represents the response from an "old-fashioned" LLM (string-in,
15
+ string-out) that generates regular text (not chat messages).
17
16
 
18
17
  This model is used internally by chat model and will eventually
19
18
  be mapped to a more general `LLMResult` object, and then projected into
@@ -21,51 +20,52 @@ class Generation(Serializable):
21
20
 
22
21
  LangChain users working with chat models will usually access information via
23
22
  `AIMessage` (returned from runnable interfaces) or `LLMResult` (available
24
- via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
25
- for more information.
23
+ via callbacks). Please refer to `AIMessage` and `LLMResult` for more information.
26
24
  """
27
25
 
28
26
  text: str
29
27
  """Generated text output."""
30
28
 
31
- generation_info: Optional[dict[str, Any]] = None
29
+ generation_info: dict[str, Any] | None = None
32
30
  """Raw response from the provider.
33
31
 
34
32
  May include things like the reason for finishing or token log probabilities.
35
33
  """
36
34
  type: Literal["Generation"] = "Generation"
37
35
  """Type is used exclusively for serialization purposes.
38
- Set to "Generation" for this class."""
36
+
37
+ Set to "Generation" for this class.
38
+ """
39
39
 
40
40
  @classmethod
41
41
  def is_lc_serializable(cls) -> bool:
42
- """Return True as this class is serializable."""
42
+ """Return `True` as this class is serializable."""
43
43
  return True
44
44
 
45
45
  @classmethod
46
46
  def get_lc_namespace(cls) -> list[str]:
47
- """Get the namespace of the langchain object.
47
+ """Get the namespace of the LangChain object.
48
48
 
49
49
  Returns:
50
- ``["langchain", "schema", "output"]``
50
+ `["langchain", "schema", "output"]`
51
51
  """
52
52
  return ["langchain", "schema", "output"]
53
53
 
54
54
 
55
55
  class GenerationChunk(Generation):
56
- """Generation chunk, which can be concatenated with other Generation chunks."""
56
+ """`GenerationChunk`, which can be concatenated with other Generation chunks."""
57
57
 
58
58
  def __add__(self, other: GenerationChunk) -> GenerationChunk:
59
- """Concatenate two ``GenerationChunk``s.
59
+ """Concatenate two `GenerationChunk`s.
60
60
 
61
61
  Args:
62
- other: Another ``GenerationChunk`` to concatenate with.
62
+ other: Another `GenerationChunk` to concatenate with.
63
63
 
64
64
  Raises:
65
- TypeError: If other is not a ``GenerationChunk``.
65
+ TypeError: If other is not a `GenerationChunk`.
66
66
 
67
67
  Returns:
68
- A new ``GenerationChunk`` concatenated from self and other.
68
+ A new `GenerationChunk` concatenated from self and other.
69
69
  """
70
70
  if isinstance(other, GenerationChunk):
71
71
  generation_info = merge_dicts(
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from copy import deepcopy
6
- from typing import Literal, Optional, Union
6
+ from typing import Literal
7
7
 
8
8
  from pydantic import BaseModel
9
9
 
@@ -21,7 +21,7 @@ class LLMResult(BaseModel):
21
21
  """
22
22
 
23
23
  generations: list[
24
- list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
24
+ list[Generation | ChatGeneration | GenerationChunk | ChatGenerationChunk]
25
25
  ]
26
26
  """Generated outputs.
27
27
 
@@ -30,13 +30,13 @@ class LLMResult(BaseModel):
30
30
  The second dimension of the list represents different candidate generations for a
31
31
  given prompt.
32
32
 
33
- - When returned from **an LLM**, the type is ``list[list[Generation]]``.
34
- - When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
33
+ - When returned from **an LLM**, the type is `list[list[Generation]]`.
34
+ - When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
35
35
 
36
36
  ChatGeneration is a subclass of Generation that has a field for a structured chat
37
37
  message.
38
38
  """
39
- llm_output: Optional[dict] = None
39
+ llm_output: dict | None = None
40
40
  """For arbitrary LLM provider specific output.
41
41
 
42
42
  This dictionary is a free-form dictionary that can contain any information that the
@@ -45,10 +45,10 @@ class LLMResult(BaseModel):
45
45
  Users should generally avoid relying on this field and instead rely on accessing
46
46
  relevant information from standardized fields present in AIMessage.
47
47
  """
48
- run: Optional[list[RunInfo]] = None
48
+ run: list[RunInfo] | None = None
49
49
  """List of metadata info for model call for each input.
50
50
 
51
- See :class:`~langchain_core.outputs.run_info.RunInfo` for details.
51
+ See `langchain_core.outputs.run_info.RunInfo` for details.
52
52
  """
53
53
 
54
54
  type: Literal["LLMResult"] = "LLMResult"
@@ -91,13 +91,13 @@ class LLMResult(BaseModel):
91
91
  return llm_results
92
92
 
93
93
  def __eq__(self, other: object) -> bool:
94
- """Check for ``LLMResult`` equality by ignoring any metadata related to runs.
94
+ """Check for `LLMResult` equality by ignoring any metadata related to runs.
95
95
 
96
96
  Args:
97
- other: Another ``LLMResult`` object to compare against.
97
+ other: Another `LLMResult` object to compare against.
98
98
 
99
99
  Returns:
100
- True if the generations and ``llm_output`` are equal, False otherwise.
100
+ `True` if the generations and `llm_output` are equal, `False` otherwise.
101
101
  """
102
102
  if not isinstance(other, LLMResult):
103
103
  return NotImplemented
@@ -24,23 +24,21 @@ from langchain_core.messages import (
24
24
  class PromptValue(Serializable, ABC):
25
25
  """Base abstract class for inputs to any language model.
26
26
 
27
- PromptValues can be converted to both LLM (pure text-generation) inputs and
28
- ChatModel inputs.
27
+ `PromptValues` can be converted to both LLM (pure text-generation) inputs and
28
+ chat model inputs.
29
29
  """
30
30
 
31
31
  @classmethod
32
32
  def is_lc_serializable(cls) -> bool:
33
- """Return True as this class is serializable."""
33
+ """Return `True` as this class is serializable."""
34
34
  return True
35
35
 
36
36
  @classmethod
37
37
  def get_lc_namespace(cls) -> list[str]:
38
- """Get the namespace of the langchain object.
39
-
40
- This is used to determine the namespace of the object when serializing.
38
+ """Get the namespace of the LangChain object.
41
39
 
42
40
  Returns:
43
- ``["langchain", "schema", "prompt"]``
41
+ `["langchain", "schema", "prompt"]`
44
42
  """
45
43
  return ["langchain", "schema", "prompt"]
46
44
 
@@ -50,7 +48,7 @@ class PromptValue(Serializable, ABC):
50
48
 
51
49
  @abstractmethod
52
50
  def to_messages(self) -> list[BaseMessage]:
53
- """Return prompt as a list of Messages."""
51
+ """Return prompt as a list of messages."""
54
52
 
55
53
 
56
54
  class StringPromptValue(PromptValue):
@@ -62,12 +60,10 @@ class StringPromptValue(PromptValue):
62
60
 
63
61
  @classmethod
64
62
  def get_lc_namespace(cls) -> list[str]:
65
- """Get the namespace of the langchain object.
66
-
67
- This is used to determine the namespace of the object when serializing.
63
+ """Get the namespace of the LangChain object.
68
64
 
69
65
  Returns:
70
- ``["langchain", "prompts", "base"]``
66
+ `["langchain", "prompts", "base"]`
71
67
  """
72
68
  return ["langchain", "prompts", "base"]
73
69
 
@@ -99,12 +95,10 @@ class ChatPromptValue(PromptValue):
99
95
 
100
96
  @classmethod
101
97
  def get_lc_namespace(cls) -> list[str]:
102
- """Get the namespace of the langchain object.
103
-
104
- This is used to determine the namespace of the object when serializing.
98
+ """Get the namespace of the LangChain object.
105
99
 
106
100
  Returns:
107
- ``["langchain", "prompts", "chat"]``
101
+ `["langchain", "prompts", "chat"]`
108
102
  """
109
103
  return ["langchain", "prompts", "chat"]
110
104
 
@@ -113,11 +107,11 @@ class ImageURL(TypedDict, total=False):
113
107
  """Image URL."""
114
108
 
115
109
  detail: Literal["auto", "low", "high"]
116
- """Specifies the detail level of the image. Defaults to ``'auto'``.
117
- Can be ``'auto'``, ``'low'``, or ``'high'``.
110
+ """Specifies the detail level of the image.
118
111
 
119
- This follows OpenAI's Chat Completion API's image URL format.
112
+ Can be `'auto'`, `'low'`, or `'high'`.
120
113
 
114
+ This follows OpenAI's Chat Completion API's image URL format.
121
115
  """
122
116
 
123
117
  url: str
@@ -1,29 +1,8 @@
1
1
  """**Prompt** is the input to the model.
2
2
 
3
- Prompt is often constructed
4
- from multiple components and prompt values. Prompt classes and functions make constructing
5
- and working with prompts easy.
6
-
7
- **Class hierarchy:**
8
-
9
- .. code-block::
10
-
11
- BasePromptTemplate --> PipelinePromptTemplate
12
- StringPromptTemplate --> PromptTemplate
13
- FewShotPromptTemplate
14
- FewShotPromptWithTemplates
15
- BaseChatPromptTemplate --> AutoGPTPrompt
16
- ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
17
-
18
-
19
-
20
- BaseMessagePromptTemplate --> MessagesPlaceholder
21
- BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
22
- HumanMessagePromptTemplate
23
- AIMessagePromptTemplate
24
- SystemMessagePromptTemplate
25
-
26
- """ # noqa: E501
3
+ Prompt is often constructed from multiple components and prompt values. Prompt classes
4
+ and functions make constructing and working with prompts easy.
5
+ """
27
6
 
28
7
  from typing import TYPE_CHECKING
29
8
 
@@ -53,7 +32,6 @@ if TYPE_CHECKING:
53
32
  FewShotPromptWithTemplates,
54
33
  )
55
34
  from langchain_core.prompts.loading import load_prompt
56
- from langchain_core.prompts.pipeline import PipelinePromptTemplate
57
35
  from langchain_core.prompts.prompt import PromptTemplate
58
36
  from langchain_core.prompts.string import (
59
37
  StringPromptTemplate,
@@ -75,7 +53,6 @@ __all__ = (
75
53
  "FewShotPromptWithTemplates",
76
54
  "HumanMessagePromptTemplate",
77
55
  "MessagesPlaceholder",
78
- "PipelinePromptTemplate",
79
56
  "PromptTemplate",
80
57
  "StringPromptTemplate",
81
58
  "SystemMessagePromptTemplate",
@@ -104,7 +81,6 @@ _dynamic_imports = {
104
81
  "FewShotPromptTemplate": "few_shot",
105
82
  "FewShotPromptWithTemplates": "few_shot_with_templates",
106
83
  "load_prompt": "loading",
107
- "PipelinePromptTemplate": "pipeline",
108
84
  "PromptTemplate": "prompt",
109
85
  "StringPromptTemplate": "string",
110
86
  "check_valid_template": "string",