langchain-core 1.0.0a5__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +51 -64
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +20 -22
  8. langchain_core/caches.py +65 -66
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +321 -336
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +436 -513
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +53 -68
  17. langchain_core/document_loaders/base.py +27 -25
  18. langchain_core/document_loaders/blob_loaders.py +1 -1
  19. langchain_core/document_loaders/langsmith.py +44 -48
  20. langchain_core/documents/__init__.py +23 -3
  21. langchain_core/documents/base.py +98 -90
  22. langchain_core/documents/compressor.py +10 -10
  23. langchain_core/documents/transformers.py +34 -35
  24. langchain_core/embeddings/fake.py +50 -54
  25. langchain_core/example_selectors/length_based.py +1 -1
  26. langchain_core/example_selectors/semantic_similarity.py +28 -32
  27. langchain_core/exceptions.py +21 -20
  28. langchain_core/globals.py +3 -151
  29. langchain_core/indexing/__init__.py +1 -1
  30. langchain_core/indexing/api.py +121 -126
  31. langchain_core/indexing/base.py +73 -75
  32. langchain_core/indexing/in_memory.py +4 -6
  33. langchain_core/language_models/__init__.py +14 -29
  34. langchain_core/language_models/_utils.py +58 -61
  35. langchain_core/language_models/base.py +53 -162
  36. langchain_core/language_models/chat_models.py +298 -387
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +42 -36
  39. langchain_core/language_models/llms.py +125 -235
  40. langchain_core/load/dump.py +9 -12
  41. langchain_core/load/load.py +18 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +42 -40
  44. langchain_core/messages/__init__.py +10 -16
  45. langchain_core/messages/ai.py +148 -148
  46. langchain_core/messages/base.py +58 -52
  47. langchain_core/messages/block_translators/__init__.py +27 -17
  48. langchain_core/messages/block_translators/anthropic.py +6 -6
  49. langchain_core/messages/block_translators/bedrock_converse.py +5 -5
  50. langchain_core/messages/block_translators/google_genai.py +505 -20
  51. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  52. langchain_core/messages/block_translators/groq.py +117 -21
  53. langchain_core/messages/block_translators/langchain_v0.py +5 -5
  54. langchain_core/messages/block_translators/openai.py +11 -11
  55. langchain_core/messages/chat.py +2 -6
  56. langchain_core/messages/content.py +337 -328
  57. langchain_core/messages/function.py +6 -10
  58. langchain_core/messages/human.py +24 -31
  59. langchain_core/messages/modifier.py +2 -2
  60. langchain_core/messages/system.py +19 -29
  61. langchain_core/messages/tool.py +74 -90
  62. langchain_core/messages/utils.py +474 -504
  63. langchain_core/output_parsers/__init__.py +13 -10
  64. langchain_core/output_parsers/base.py +61 -61
  65. langchain_core/output_parsers/format_instructions.py +9 -4
  66. langchain_core/output_parsers/json.py +12 -10
  67. langchain_core/output_parsers/list.py +21 -23
  68. langchain_core/output_parsers/openai_functions.py +49 -47
  69. langchain_core/output_parsers/openai_tools.py +16 -21
  70. langchain_core/output_parsers/pydantic.py +13 -14
  71. langchain_core/output_parsers/string.py +5 -5
  72. langchain_core/output_parsers/transform.py +15 -17
  73. langchain_core/output_parsers/xml.py +35 -34
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +18 -18
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +10 -11
  78. langchain_core/outputs/llm_result.py +10 -10
  79. langchain_core/prompt_values.py +11 -17
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +48 -56
  82. langchain_core/prompts/chat.py +275 -325
  83. langchain_core/prompts/dict.py +5 -5
  84. langchain_core/prompts/few_shot.py +81 -88
  85. langchain_core/prompts/few_shot_with_templates.py +11 -13
  86. langchain_core/prompts/image.py +12 -14
  87. langchain_core/prompts/loading.py +4 -6
  88. langchain_core/prompts/message.py +3 -3
  89. langchain_core/prompts/prompt.py +24 -39
  90. langchain_core/prompts/string.py +26 -10
  91. langchain_core/prompts/structured.py +49 -53
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +61 -198
  94. langchain_core/runnables/base.py +1478 -1630
  95. langchain_core/runnables/branch.py +53 -57
  96. langchain_core/runnables/config.py +72 -89
  97. langchain_core/runnables/configurable.py +120 -137
  98. langchain_core/runnables/fallbacks.py +83 -79
  99. langchain_core/runnables/graph.py +91 -97
  100. langchain_core/runnables/graph_ascii.py +27 -28
  101. langchain_core/runnables/graph_mermaid.py +38 -50
  102. langchain_core/runnables/graph_png.py +15 -16
  103. langchain_core/runnables/history.py +135 -148
  104. langchain_core/runnables/passthrough.py +124 -150
  105. langchain_core/runnables/retry.py +46 -51
  106. langchain_core/runnables/router.py +25 -30
  107. langchain_core/runnables/schema.py +75 -80
  108. langchain_core/runnables/utils.py +60 -67
  109. langchain_core/stores.py +85 -121
  110. langchain_core/structured_query.py +8 -8
  111. langchain_core/sys_info.py +27 -29
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +285 -229
  114. langchain_core/tools/convert.py +160 -155
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -11
  117. langchain_core/tools/simple.py +19 -24
  118. langchain_core/tools/structured.py +32 -39
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/base.py +97 -99
  121. langchain_core/tracers/context.py +29 -52
  122. langchain_core/tracers/core.py +49 -53
  123. langchain_core/tracers/evaluation.py +11 -11
  124. langchain_core/tracers/event_stream.py +65 -64
  125. langchain_core/tracers/langchain.py +21 -21
  126. langchain_core/tracers/log_stream.py +45 -45
  127. langchain_core/tracers/memory_stream.py +3 -3
  128. langchain_core/tracers/root_listeners.py +16 -16
  129. langchain_core/tracers/run_collector.py +2 -4
  130. langchain_core/tracers/schemas.py +0 -129
  131. langchain_core/tracers/stdout.py +3 -3
  132. langchain_core/utils/__init__.py +1 -4
  133. langchain_core/utils/_merge.py +2 -2
  134. langchain_core/utils/aiter.py +57 -61
  135. langchain_core/utils/env.py +9 -9
  136. langchain_core/utils/function_calling.py +89 -186
  137. langchain_core/utils/html.py +7 -8
  138. langchain_core/utils/input.py +6 -6
  139. langchain_core/utils/interactive_env.py +1 -1
  140. langchain_core/utils/iter.py +36 -40
  141. langchain_core/utils/json.py +4 -3
  142. langchain_core/utils/json_schema.py +9 -9
  143. langchain_core/utils/mustache.py +8 -10
  144. langchain_core/utils/pydantic.py +33 -35
  145. langchain_core/utils/strings.py +6 -9
  146. langchain_core/utils/usage.py +1 -1
  147. langchain_core/utils/utils.py +66 -62
  148. langchain_core/vectorstores/base.py +182 -216
  149. langchain_core/vectorstores/in_memory.py +101 -176
  150. langchain_core/vectorstores/utils.py +5 -5
  151. langchain_core/version.py +1 -1
  152. langchain_core-1.0.3.dist-info/METADATA +69 -0
  153. langchain_core-1.0.3.dist-info/RECORD +172 -0
  154. {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.3.dist-info}/WHEEL +1 -1
  155. langchain_core/memory.py +0 -120
  156. langchain_core/messages/block_translators/ollama.py +0 -47
  157. langchain_core/prompts/pipeline.py +0 -138
  158. langchain_core/pydantic_v1/__init__.py +0 -30
  159. langchain_core/pydantic_v1/dataclasses.py +0 -23
  160. langchain_core/pydantic_v1/main.py +0 -23
  161. langchain_core/tracers/langchain_v1.py +0 -31
  162. langchain_core/utils/loading.py +0 -35
  163. langchain_core-1.0.0a5.dist-info/METADATA +0 -77
  164. langchain_core-1.0.0a5.dist-info/RECORD +0 -181
  165. langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from abc import ABC, abstractmethod
6
- from typing import TYPE_CHECKING, Optional
6
+ from typing import TYPE_CHECKING
7
7
 
8
8
  from pydantic import BaseModel
9
9
 
@@ -21,14 +21,14 @@ class BaseDocumentCompressor(BaseModel, ABC):
21
21
 
22
22
  This abstraction is primarily used for post-processing of retrieved documents.
23
23
 
24
- Documents matching a given query are first retrieved.
24
+ `Document` objects matching a given query are first retrieved.
25
25
 
26
26
  Then the list of documents can be further processed.
27
27
 
28
28
  For example, one could re-rank the retrieved documents using an LLM.
29
29
 
30
- .. note::
31
- Users should favor using a RunnableLambda instead of sub-classing from this
30
+ !!! note
31
+ Users should favor using a `RunnableLambda` instead of sub-classing from this
32
32
  interface.
33
33
 
34
34
  """
@@ -38,14 +38,14 @@ class BaseDocumentCompressor(BaseModel, ABC):
38
38
  self,
39
39
  documents: Sequence[Document],
40
40
  query: str,
41
- callbacks: Optional[Callbacks] = None,
41
+ callbacks: Callbacks | None = None,
42
42
  ) -> Sequence[Document]:
43
43
  """Compress retrieved documents given the query context.
44
44
 
45
45
  Args:
46
- documents: The retrieved documents.
46
+ documents: The retrieved `Document` objects.
47
47
  query: The query context.
48
- callbacks: Optional callbacks to run during compression.
48
+ callbacks: Optional `Callbacks` to run during compression.
49
49
 
50
50
  Returns:
51
51
  The compressed documents.
@@ -56,14 +56,14 @@ class BaseDocumentCompressor(BaseModel, ABC):
56
56
  self,
57
57
  documents: Sequence[Document],
58
58
  query: str,
59
- callbacks: Optional[Callbacks] = None,
59
+ callbacks: Callbacks | None = None,
60
60
  ) -> Sequence[Document]:
61
61
  """Async compress retrieved documents given the query context.
62
62
 
63
63
  Args:
64
- documents: The retrieved documents.
64
+ documents: The retrieved `Document` objects.
65
65
  query: The query context.
66
- callbacks: Optional callbacks to run during compression.
66
+ callbacks: Optional `Callbacks` to run during compression.
67
67
 
68
68
  Returns:
69
69
  The compressed documents.
@@ -16,39 +16,38 @@ if TYPE_CHECKING:
16
16
  class BaseDocumentTransformer(ABC):
17
17
  """Abstract base class for document transformation.
18
18
 
19
- A document transformation takes a sequence of Documents and returns a
20
- sequence of transformed Documents.
19
+ A document transformation takes a sequence of `Document` objects and returns a
20
+ sequence of transformed `Document` objects.
21
21
 
22
22
  Example:
23
- .. code-block:: python
24
-
25
- class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
26
- embeddings: Embeddings
27
- similarity_fn: Callable = cosine_similarity
28
- similarity_threshold: float = 0.95
29
-
30
- class Config:
31
- arbitrary_types_allowed = True
32
-
33
- def transform_documents(
34
- self, documents: Sequence[Document], **kwargs: Any
35
- ) -> Sequence[Document]:
36
- stateful_documents = get_stateful_documents(documents)
37
- embedded_documents = _get_embeddings_from_stateful_docs(
38
- self.embeddings, stateful_documents
39
- )
40
- included_idxs = _filter_similar_embeddings(
41
- embedded_documents,
42
- self.similarity_fn,
43
- self.similarity_threshold,
44
- )
45
- return [stateful_documents[i] for i in sorted(included_idxs)]
46
-
47
- async def atransform_documents(
48
- self, documents: Sequence[Document], **kwargs: Any
49
- ) -> Sequence[Document]:
50
- raise NotImplementedError
51
-
23
+ ```python
24
+ class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
25
+ embeddings: Embeddings
26
+ similarity_fn: Callable = cosine_similarity
27
+ similarity_threshold: float = 0.95
28
+
29
+ class Config:
30
+ arbitrary_types_allowed = True
31
+
32
+ def transform_documents(
33
+ self, documents: Sequence[Document], **kwargs: Any
34
+ ) -> Sequence[Document]:
35
+ stateful_documents = get_stateful_documents(documents)
36
+ embedded_documents = _get_embeddings_from_stateful_docs(
37
+ self.embeddings, stateful_documents
38
+ )
39
+ included_idxs = _filter_similar_embeddings(
40
+ embedded_documents,
41
+ self.similarity_fn,
42
+ self.similarity_threshold,
43
+ )
44
+ return [stateful_documents[i] for i in sorted(included_idxs)]
45
+
46
+ async def atransform_documents(
47
+ self, documents: Sequence[Document], **kwargs: Any
48
+ ) -> Sequence[Document]:
49
+ raise NotImplementedError
50
+ ```
52
51
  """
53
52
 
54
53
  @abstractmethod
@@ -58,10 +57,10 @@ class BaseDocumentTransformer(ABC):
58
57
  """Transform a list of documents.
59
58
 
60
59
  Args:
61
- documents: A sequence of Documents to be transformed.
60
+ documents: A sequence of `Document` objects to be transformed.
62
61
 
63
62
  Returns:
64
- A sequence of transformed Documents.
63
+ A sequence of transformed `Document` objects.
65
64
  """
66
65
 
67
66
  async def atransform_documents(
@@ -70,10 +69,10 @@ class BaseDocumentTransformer(ABC):
70
69
  """Asynchronously transform a list of documents.
71
70
 
72
71
  Args:
73
- documents: A sequence of Documents to be transformed.
72
+ documents: A sequence of `Document` objects to be transformed.
74
73
 
75
74
  Returns:
76
- A sequence of transformed Documents.
75
+ A sequence of transformed `Document` objects.
77
76
  """
78
77
  return await run_in_executor(
79
78
  None, self.transform_documents, documents, **kwargs
@@ -18,40 +18,38 @@ class FakeEmbeddings(Embeddings, BaseModel):
18
18
 
19
19
  This embedding model creates embeddings by sampling from a normal distribution.
20
20
 
21
- Do not use this outside of testing, as it is not a real embedding model.
21
+ !!! danger "Toy model"
22
+ Do not use this outside of testing, as it is not a real embedding model.
22
23
 
23
24
  Instantiate:
24
- .. code-block:: python
25
+ ```python
26
+ from langchain_core.embeddings import FakeEmbeddings
25
27
 
26
- from langchain_core.embeddings import FakeEmbeddings
27
-
28
- embed = FakeEmbeddings(size=100)
28
+ embed = FakeEmbeddings(size=100)
29
+ ```
29
30
 
30
31
  Embed single text:
31
- .. code-block:: python
32
-
33
- input_text = "The meaning of life is 42"
34
- vector = embed.embed_query(input_text)
35
- print(vector[:3])
36
-
37
- .. code-block:: python
38
-
39
- [-0.700234640213188, -0.581266257710429, -1.1328482266445354]
32
+ ```python
33
+ input_text = "The meaning of life is 42"
34
+ vector = embed.embed_query(input_text)
35
+ print(vector[:3])
36
+ ```
37
+ ```python
38
+ [-0.700234640213188, -0.581266257710429, -1.1328482266445354]
39
+ ```
40
40
 
41
41
  Embed multiple texts:
42
- .. code-block:: python
43
-
44
- input_texts = ["Document 1...", "Document 2..."]
45
- vectors = embed.embed_documents(input_texts)
46
- print(len(vectors))
47
- # The first 3 coordinates for the first vector
48
- print(vectors[0][:3])
49
-
50
- .. code-block:: python
51
-
52
- 2
53
- [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
54
-
42
+ ```python
43
+ input_texts = ["Document 1...", "Document 2..."]
44
+ vectors = embed.embed_documents(input_texts)
45
+ print(len(vectors))
46
+ # The first 3 coordinates for the first vector
47
+ print(vectors[0][:3])
48
+ ```
49
+ ```python
50
+ 2
51
+ [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
52
+ ```
55
53
  """
56
54
 
57
55
  size: int
@@ -75,40 +73,38 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
75
73
  This embedding model creates embeddings by sampling from a normal distribution
76
74
  with a seed based on the hash of the text.
77
75
 
78
- Do not use this outside of testing, as it is not a real embedding model.
76
+ !!! danger "Toy model"
77
+ Do not use this outside of testing, as it is not a real embedding model.
79
78
 
80
79
  Instantiate:
81
- .. code-block:: python
80
+ ```python
81
+ from langchain_core.embeddings import DeterministicFakeEmbedding
82
82
 
83
- from langchain_core.embeddings import DeterministicFakeEmbedding
84
-
85
- embed = DeterministicFakeEmbedding(size=100)
83
+ embed = DeterministicFakeEmbedding(size=100)
84
+ ```
86
85
 
87
86
  Embed single text:
88
- .. code-block:: python
89
-
90
- input_text = "The meaning of life is 42"
91
- vector = embed.embed_query(input_text)
92
- print(vector[:3])
93
-
94
- .. code-block:: python
95
-
96
- [-0.700234640213188, -0.581266257710429, -1.1328482266445354]
87
+ ```python
88
+ input_text = "The meaning of life is 42"
89
+ vector = embed.embed_query(input_text)
90
+ print(vector[:3])
91
+ ```
92
+ ```python
93
+ [-0.700234640213188, -0.581266257710429, -1.1328482266445354]
94
+ ```
97
95
 
98
96
  Embed multiple texts:
99
- .. code-block:: python
100
-
101
- input_texts = ["Document 1...", "Document 2..."]
102
- vectors = embed.embed_documents(input_texts)
103
- print(len(vectors))
104
- # The first 3 coordinates for the first vector
105
- print(vectors[0][:3])
106
-
107
- .. code-block:: python
108
-
109
- 2
110
- [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
111
-
97
+ ```python
98
+ input_texts = ["Document 1...", "Document 2..."]
99
+ vectors = embed.embed_documents(input_texts)
100
+ print(len(vectors))
101
+ # The first 3 coordinates for the first vector
102
+ print(vectors[0][:3])
103
+ ```
104
+ ```python
105
+ 2
106
+ [-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
107
+ ```
112
108
  """
113
109
 
114
110
  size: int
@@ -1,7 +1,7 @@
1
1
  """Select examples based on length."""
2
2
 
3
3
  import re
4
- from typing import Callable
4
+ from collections.abc import Callable
5
5
 
6
6
  from pydantic import BaseModel, Field, model_validator
7
7
  from typing_extensions import Self
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from abc import ABC
6
- from typing import TYPE_CHECKING, Any, Optional
6
+ from typing import TYPE_CHECKING, Any
7
7
 
8
8
  from pydantic import BaseModel, ConfigDict
9
9
 
@@ -35,13 +35,13 @@ class _VectorStoreExampleSelector(BaseExampleSelector, BaseModel, ABC):
35
35
  """VectorStore that contains information about examples."""
36
36
  k: int = 4
37
37
  """Number of examples to select."""
38
- example_keys: Optional[list[str]] = None
38
+ example_keys: list[str] | None = None
39
39
  """Optional keys to filter examples to."""
40
- input_keys: Optional[list[str]] = None
40
+ input_keys: list[str] | None = None
41
41
  """Optional keys to filter input to. If provided, the search is based on
42
42
  the input variables instead of all variables."""
43
- vectorstore_kwargs: Optional[dict[str, Any]] = None
44
- """Extra arguments passed to similarity_search function of the vectorstore."""
43
+ vectorstore_kwargs: dict[str, Any] | None = None
44
+ """Extra arguments passed to similarity_search function of the `VectorStore`."""
45
45
 
46
46
  model_config = ConfigDict(
47
47
  arbitrary_types_allowed=True,
@@ -49,9 +49,7 @@ class _VectorStoreExampleSelector(BaseExampleSelector, BaseModel, ABC):
49
49
  )
50
50
 
51
51
  @staticmethod
52
- def _example_to_text(
53
- example: dict[str, str], input_keys: Optional[list[str]]
54
- ) -> str:
52
+ def _example_to_text(example: dict[str, str], input_keys: list[str] | None) -> str:
55
53
  if input_keys:
56
54
  return " ".join(sorted_values({key: example[key] for key in input_keys}))
57
55
  return " ".join(sorted_values(example))
@@ -142,10 +140,10 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
142
140
  embeddings: Embeddings,
143
141
  vectorstore_cls: type[VectorStore],
144
142
  k: int = 4,
145
- input_keys: Optional[list[str]] = None,
143
+ input_keys: list[str] | None = None,
146
144
  *,
147
- example_keys: Optional[list[str]] = None,
148
- vectorstore_kwargs: Optional[dict] = None,
145
+ example_keys: list[str] | None = None,
146
+ vectorstore_kwargs: dict | None = None,
149
147
  **vectorstore_cls_kwargs: Any,
150
148
  ) -> SemanticSimilarityExampleSelector:
151
149
  """Create k-shot example selector using example list and embeddings.
@@ -156,12 +154,12 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
156
154
  examples: List of examples to use in the prompt.
157
155
  embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
158
156
  vectorstore_cls: A vector store DB interface class, e.g. FAISS.
159
- k: Number of examples to select. Default is 4.
157
+ k: Number of examples to select.
160
158
  input_keys: If provided, the search is based on the input variables
161
159
  instead of all variables.
162
160
  example_keys: If provided, keys to filter examples to.
163
161
  vectorstore_kwargs: Extra arguments passed to similarity_search function
164
- of the vectorstore.
162
+ of the `VectorStore`.
165
163
  vectorstore_cls_kwargs: optional kwargs containing url for vector store
166
164
 
167
165
  Returns:
@@ -186,10 +184,10 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
186
184
  embeddings: Embeddings,
187
185
  vectorstore_cls: type[VectorStore],
188
186
  k: int = 4,
189
- input_keys: Optional[list[str]] = None,
187
+ input_keys: list[str] | None = None,
190
188
  *,
191
- example_keys: Optional[list[str]] = None,
192
- vectorstore_kwargs: Optional[dict] = None,
189
+ example_keys: list[str] | None = None,
190
+ vectorstore_kwargs: dict | None = None,
193
191
  **vectorstore_cls_kwargs: Any,
194
192
  ) -> SemanticSimilarityExampleSelector:
195
193
  """Async create k-shot example selector using example list and embeddings.
@@ -200,12 +198,12 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
200
198
  examples: List of examples to use in the prompt.
201
199
  embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
202
200
  vectorstore_cls: A vector store DB interface class, e.g. FAISS.
203
- k: Number of examples to select. Default is 4.
201
+ k: Number of examples to select.
204
202
  input_keys: If provided, the search is based on the input variables
205
203
  instead of all variables.
206
204
  example_keys: If provided, keys to filter examples to.
207
205
  vectorstore_kwargs: Extra arguments passed to similarity_search function
208
- of the vectorstore.
206
+ of the `VectorStore`.
209
207
  vectorstore_cls_kwargs: optional kwargs containing url for vector store
210
208
 
211
209
  Returns:
@@ -273,10 +271,10 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
273
271
  embeddings: Embeddings,
274
272
  vectorstore_cls: type[VectorStore],
275
273
  k: int = 4,
276
- input_keys: Optional[list[str]] = None,
274
+ input_keys: list[str] | None = None,
277
275
  fetch_k: int = 20,
278
- example_keys: Optional[list[str]] = None,
279
- vectorstore_kwargs: Optional[dict] = None,
276
+ example_keys: list[str] | None = None,
277
+ vectorstore_kwargs: dict | None = None,
280
278
  **vectorstore_cls_kwargs: Any,
281
279
  ) -> MaxMarginalRelevanceExampleSelector:
282
280
  """Create k-shot example selector using example list and embeddings.
@@ -287,14 +285,13 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
287
285
  examples: List of examples to use in the prompt.
288
286
  embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
289
287
  vectorstore_cls: A vector store DB interface class, e.g. FAISS.
290
- k: Number of examples to select. Default is 4.
291
- fetch_k: Number of Documents to fetch to pass to MMR algorithm.
292
- Default is 20.
288
+ k: Number of examples to select.
289
+ fetch_k: Number of `Document` objects to fetch to pass to MMR algorithm.
293
290
  input_keys: If provided, the search is based on the input variables
294
291
  instead of all variables.
295
292
  example_keys: If provided, keys to filter examples to.
296
293
  vectorstore_kwargs: Extra arguments passed to similarity_search function
297
- of the vectorstore.
294
+ of the `VectorStore`.
298
295
  vectorstore_cls_kwargs: optional kwargs containing url for vector store
299
296
 
300
297
  Returns:
@@ -321,10 +318,10 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
321
318
  vectorstore_cls: type[VectorStore],
322
319
  *,
323
320
  k: int = 4,
324
- input_keys: Optional[list[str]] = None,
321
+ input_keys: list[str] | None = None,
325
322
  fetch_k: int = 20,
326
- example_keys: Optional[list[str]] = None,
327
- vectorstore_kwargs: Optional[dict] = None,
323
+ example_keys: list[str] | None = None,
324
+ vectorstore_kwargs: dict | None = None,
328
325
  **vectorstore_cls_kwargs: Any,
329
326
  ) -> MaxMarginalRelevanceExampleSelector:
330
327
  """Create k-shot example selector using example list and embeddings.
@@ -335,14 +332,13 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
335
332
  examples: List of examples to use in the prompt.
336
333
  embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
337
334
  vectorstore_cls: A vector store DB interface class, e.g. FAISS.
338
- k: Number of examples to select. Default is 4.
339
- fetch_k: Number of Documents to fetch to pass to MMR algorithm.
340
- Default is 20.
335
+ k: Number of examples to select.
336
+ fetch_k: Number of `Document` objects to fetch to pass to MMR algorithm.
341
337
  input_keys: If provided, the search is based on the input variables
342
338
  instead of all variables.
343
339
  example_keys: If provided, keys to filter examples to.
344
340
  vectorstore_kwargs: Extra arguments passed to similarity_search function
345
- of the vectorstore.
341
+ of the `VectorStore`.
346
342
  vectorstore_cls_kwargs: optional kwargs containing url for vector store
347
343
 
348
344
  Returns:
@@ -1,7 +1,7 @@
1
1
  """Custom **exceptions** for LangChain."""
2
2
 
3
3
  from enum import Enum
4
- from typing import Any, Optional
4
+ from typing import Any
5
5
 
6
6
 
7
7
  class LangChainException(Exception): # noqa: N818
@@ -16,36 +16,37 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
16
16
  """Exception that output parsers should raise to signify a parsing error.
17
17
 
18
18
  This exists to differentiate parsing errors from other code or execution errors
19
- that also may arise inside the output parser. OutputParserExceptions will be
20
- available to catch and handle in ways to fix the parsing error, while other
21
- errors will be raised.
19
+ that also may arise inside the output parser.
20
+
21
+ `OutputParserException` will be available to catch and handle in ways to fix the
22
+ parsing error, while other errors will be raised.
22
23
  """
23
24
 
24
25
  def __init__(
25
26
  self,
26
27
  error: Any,
27
- observation: Optional[str] = None,
28
- llm_output: Optional[str] = None,
28
+ observation: str | None = None,
29
+ llm_output: str | None = None,
29
30
  send_to_llm: bool = False, # noqa: FBT001,FBT002
30
31
  ):
31
- """Create an OutputParserException.
32
+ """Create an `OutputParserException`.
32
33
 
33
34
  Args:
34
35
  error: The error that's being re-raised or an error message.
35
- observation: String explanation of error which can be passed to a
36
- model to try and remediate the issue. Defaults to None.
36
+ observation: String explanation of error which can be passed to a model to
37
+ try and remediate the issue.
37
38
  llm_output: String model output which is error-ing.
38
- Defaults to None.
39
+
39
40
  send_to_llm: Whether to send the observation and llm_output back to an Agent
40
- after an OutputParserException has been raised.
41
+ after an `OutputParserException` has been raised.
42
+
41
43
  This gives the underlying model driving the agent the context that the
42
44
  previous output was improperly structured, in the hopes that it will
43
45
  update the output to the correct format.
44
- Defaults to False.
45
46
 
46
47
  Raises:
47
- ValueError: If ``send_to_llm`` is True but either observation or
48
- ``llm_output`` are not provided.
48
+ ValueError: If `send_to_llm` is `True` but either observation or
49
+ `llm_output` are not provided.
49
50
  """
50
51
  if isinstance(error, str):
51
52
  error = create_message(
@@ -67,11 +68,11 @@ class ErrorCode(Enum):
67
68
  """Error codes."""
68
69
 
69
70
  INVALID_PROMPT_INPUT = "INVALID_PROMPT_INPUT"
70
- INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS"
71
+ INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS" # Used in JS; not Py (yet)
71
72
  MESSAGE_COERCION_FAILURE = "MESSAGE_COERCION_FAILURE"
72
- MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION"
73
- MODEL_NOT_FOUND = "MODEL_NOT_FOUND"
74
- MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT"
73
+ MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION" # Used in JS; not Py (yet)
74
+ MODEL_NOT_FOUND = "MODEL_NOT_FOUND" # Used in JS; not Py (yet)
75
+ MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT" # Used in JS; not Py (yet)
75
76
  OUTPUT_PARSING_FAILURE = "OUTPUT_PARSING_FAILURE"
76
77
 
77
78
 
@@ -87,6 +88,6 @@ def create_message(*, message: str, error_code: ErrorCode) -> str:
87
88
  """
88
89
  return (
89
90
  f"{message}\n"
90
- "For troubleshooting, visit: https://python.langchain.com/docs/"
91
- f"troubleshooting/errors/{error_code.value} "
91
+ "For troubleshooting, visit: https://docs.langchain.com/oss/python/langchain"
92
+ f"/errors/{error_code.value} "
92
93
  )