langchain-core 0.3.74__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (122) hide show
  1. langchain_core/_api/beta_decorator.py +18 -41
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +2 -3
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/file.py +13 -2
  9. langchain_core/callbacks/manager.py +129 -78
  10. langchain_core/callbacks/usage.py +4 -2
  11. langchain_core/chat_history.py +10 -12
  12. langchain_core/document_loaders/base.py +34 -9
  13. langchain_core/document_loaders/langsmith.py +3 -0
  14. langchain_core/documents/base.py +36 -11
  15. langchain_core/documents/compressor.py +9 -6
  16. langchain_core/documents/transformers.py +4 -2
  17. langchain_core/embeddings/fake.py +8 -5
  18. langchain_core/env.py +2 -3
  19. langchain_core/example_selectors/base.py +12 -0
  20. langchain_core/exceptions.py +7 -0
  21. langchain_core/globals.py +17 -28
  22. langchain_core/indexing/api.py +56 -44
  23. langchain_core/indexing/base.py +7 -10
  24. langchain_core/indexing/in_memory.py +23 -3
  25. langchain_core/language_models/__init__.py +3 -2
  26. langchain_core/language_models/base.py +64 -39
  27. langchain_core/language_models/chat_models.py +130 -42
  28. langchain_core/language_models/fake_chat_models.py +10 -11
  29. langchain_core/language_models/llms.py +49 -17
  30. langchain_core/load/dump.py +5 -7
  31. langchain_core/load/load.py +15 -1
  32. langchain_core/load/serializable.py +38 -43
  33. langchain_core/memory.py +7 -3
  34. langchain_core/messages/ai.py +36 -16
  35. langchain_core/messages/base.py +13 -6
  36. langchain_core/messages/content_blocks.py +23 -2
  37. langchain_core/messages/human.py +2 -6
  38. langchain_core/messages/modifier.py +1 -1
  39. langchain_core/messages/system.py +2 -6
  40. langchain_core/messages/tool.py +36 -16
  41. langchain_core/messages/utils.py +198 -87
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +8 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/transform.py +2 -2
  50. langchain_core/output_parsers/xml.py +23 -22
  51. langchain_core/outputs/chat_generation.py +18 -7
  52. langchain_core/outputs/generation.py +14 -3
  53. langchain_core/outputs/llm_result.py +8 -1
  54. langchain_core/prompt_values.py +10 -4
  55. langchain_core/prompts/base.py +4 -9
  56. langchain_core/prompts/chat.py +88 -61
  57. langchain_core/prompts/dict.py +16 -8
  58. langchain_core/prompts/few_shot.py +9 -11
  59. langchain_core/prompts/few_shot_with_templates.py +5 -1
  60. langchain_core/prompts/image.py +12 -5
  61. langchain_core/prompts/message.py +5 -6
  62. langchain_core/prompts/pipeline.py +13 -8
  63. langchain_core/prompts/prompt.py +22 -8
  64. langchain_core/prompts/string.py +18 -10
  65. langchain_core/prompts/structured.py +7 -2
  66. langchain_core/rate_limiters.py +2 -2
  67. langchain_core/retrievers.py +7 -6
  68. langchain_core/runnables/base.py +842 -567
  69. langchain_core/runnables/branch.py +15 -20
  70. langchain_core/runnables/config.py +11 -17
  71. langchain_core/runnables/configurable.py +34 -19
  72. langchain_core/runnables/fallbacks.py +24 -17
  73. langchain_core/runnables/graph.py +47 -40
  74. langchain_core/runnables/graph_ascii.py +40 -17
  75. langchain_core/runnables/graph_mermaid.py +27 -15
  76. langchain_core/runnables/graph_png.py +27 -31
  77. langchain_core/runnables/history.py +56 -59
  78. langchain_core/runnables/passthrough.py +47 -24
  79. langchain_core/runnables/retry.py +10 -6
  80. langchain_core/runnables/router.py +10 -9
  81. langchain_core/runnables/schema.py +2 -0
  82. langchain_core/runnables/utils.py +51 -89
  83. langchain_core/stores.py +13 -25
  84. langchain_core/structured_query.py +3 -7
  85. langchain_core/sys_info.py +9 -8
  86. langchain_core/tools/base.py +30 -23
  87. langchain_core/tools/convert.py +24 -13
  88. langchain_core/tools/simple.py +35 -3
  89. langchain_core/tools/structured.py +26 -3
  90. langchain_core/tracers/_streaming.py +6 -7
  91. langchain_core/tracers/base.py +2 -2
  92. langchain_core/tracers/context.py +5 -1
  93. langchain_core/tracers/core.py +109 -39
  94. langchain_core/tracers/evaluation.py +22 -26
  95. langchain_core/tracers/event_stream.py +41 -28
  96. langchain_core/tracers/langchain.py +12 -3
  97. langchain_core/tracers/langchain_v1.py +10 -2
  98. langchain_core/tracers/log_stream.py +57 -18
  99. langchain_core/tracers/root_listeners.py +4 -20
  100. langchain_core/tracers/run_collector.py +6 -16
  101. langchain_core/tracers/schemas.py +5 -1
  102. langchain_core/utils/aiter.py +14 -6
  103. langchain_core/utils/env.py +3 -0
  104. langchain_core/utils/function_calling.py +49 -30
  105. langchain_core/utils/interactive_env.py +6 -2
  106. langchain_core/utils/iter.py +11 -3
  107. langchain_core/utils/json.py +5 -2
  108. langchain_core/utils/json_schema.py +15 -5
  109. langchain_core/utils/loading.py +5 -1
  110. langchain_core/utils/mustache.py +24 -15
  111. langchain_core/utils/pydantic.py +32 -4
  112. langchain_core/utils/utils.py +24 -8
  113. langchain_core/vectorstores/base.py +7 -20
  114. langchain_core/vectorstores/in_memory.py +18 -12
  115. langchain_core/vectorstores/utils.py +18 -12
  116. langchain_core/version.py +1 -1
  117. langchain_core-0.3.76.dist-info/METADATA +77 -0
  118. langchain_core-0.3.76.dist-info/RECORD +174 -0
  119. langchain_core-0.3.74.dist-info/METADATA +0 -108
  120. langchain_core-0.3.74.dist-info/RECORD +0 -174
  121. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  122. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
@@ -12,6 +12,7 @@ from langchain_core.callbacks import BaseCallbackHandler
12
12
  from langchain_core.messages import AIMessage
13
13
  from langchain_core.messages.ai import UsageMetadata, add_usage
14
14
  from langchain_core.outputs import ChatGeneration, LLMResult
15
+ from langchain_core.tracers.context import register_configure_hook
15
16
 
16
17
 
17
18
  class UsageMetadataCallbackHandler(BaseCallbackHandler):
@@ -101,6 +102,9 @@ def get_usage_metadata_callback(
101
102
  name (str): The name of the context variable. Defaults to
102
103
  ``'usage_metadata_callback'``.
103
104
 
105
+ Yields:
106
+ The usage metadata callback.
107
+
104
108
  Example:
105
109
  .. code-block:: python
106
110
 
@@ -130,8 +134,6 @@ def get_usage_metadata_callback(
130
134
  .. versionadded:: 0.3.49
131
135
 
132
136
  """
133
- from langchain_core.tracers.context import register_configure_hook
134
-
135
137
  usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
136
138
  ContextVar(name, default=None)
137
139
  )
@@ -27,6 +27,7 @@ from langchain_core.messages import (
27
27
  HumanMessage,
28
28
  get_buffer_string,
29
29
  )
30
+ from langchain_core.runnables.config import run_in_executor
30
31
 
31
32
  if TYPE_CHECKING:
32
33
  from collections.abc import Sequence
@@ -109,17 +110,18 @@ class BaseChatMessageHistory(ABC):
109
110
 
110
111
  In general, fetching messages may involve IO to the underlying
111
112
  persistence layer.
112
- """
113
- from langchain_core.runnables.config import run_in_executor
114
113
 
114
+ Returns:
115
+ The messages.
116
+ """
115
117
  return await run_in_executor(None, lambda: self.messages)
116
118
 
117
119
  def add_user_message(self, message: Union[HumanMessage, str]) -> None:
118
120
  """Convenience method for adding a human message string to the store.
119
121
 
120
- Please note that this is a convenience method. Code should favor the
121
- bulk add_messages interface instead to save on round-trips to the underlying
122
- persistence layer.
122
+ .. note::
123
+ This is a convenience method. Code should favor the bulk ``add_messages``
124
+ interface instead to save on round-trips to the persistence layer.
123
125
 
124
126
  This method may be deprecated in a future release.
125
127
 
@@ -134,9 +136,9 @@ class BaseChatMessageHistory(ABC):
134
136
  def add_ai_message(self, message: Union[AIMessage, str]) -> None:
135
137
  """Convenience method for adding an AI message string to the store.
136
138
 
137
- Please note that this is a convenience method. Code should favor the bulk
138
- add_messages interface instead to save on round-trips to the underlying
139
- persistence layer.
139
+ .. note::
140
+ This is a convenience method. Code should favor the bulk ``add_messages``
141
+ interface instead to save on round-trips to the persistence layer.
140
142
 
141
143
  This method may be deprecated in a future release.
142
144
 
@@ -187,8 +189,6 @@ class BaseChatMessageHistory(ABC):
187
189
  Args:
188
190
  messages: A sequence of BaseMessage objects to store.
189
191
  """
190
- from langchain_core.runnables.config import run_in_executor
191
-
192
192
  await run_in_executor(None, self.add_messages, messages)
193
193
 
194
194
  @abstractmethod
@@ -197,8 +197,6 @@ class BaseChatMessageHistory(ABC):
197
197
 
198
198
  async def aclear(self) -> None:
199
199
  """Async remove all messages from the store."""
200
- from langchain_core.runnables.config import run_in_executor
201
-
202
200
  await run_in_executor(None, self.clear)
203
201
 
204
202
  def __str__(self) -> str:
@@ -15,6 +15,13 @@ if TYPE_CHECKING:
15
15
  from langchain_core.documents import Document
16
16
  from langchain_core.documents.base import Blob
17
17
 
18
+ try:
19
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
20
+
21
+ _HAS_TEXT_SPLITTERS = True
22
+ except ImportError:
23
+ _HAS_TEXT_SPLITTERS = False
24
+
18
25
 
19
26
  class BaseLoader(ABC): # noqa: B024
20
27
  """Interface for Document Loader.
@@ -28,11 +35,19 @@ class BaseLoader(ABC): # noqa: B024
28
35
  # Sub-classes should not implement this method directly. Instead, they
29
36
  # should implement the lazy load method.
30
37
  def load(self) -> list[Document]:
31
- """Load data into Document objects."""
38
+ """Load data into Document objects.
39
+
40
+ Returns:
41
+ the documents.
42
+ """
32
43
  return list(self.lazy_load())
33
44
 
34
45
  async def aload(self) -> list[Document]:
35
- """Load data into Document objects."""
46
+ """Load data into Document objects.
47
+
48
+ Returns:
49
+ the documents.
50
+ """
36
51
  return [document async for document in self.alazy_load()]
37
52
 
38
53
  def load_and_split(
@@ -44,21 +59,23 @@ class BaseLoader(ABC): # noqa: B024
44
59
 
45
60
  Args:
46
61
  text_splitter: TextSplitter instance to use for splitting documents.
47
- Defaults to RecursiveCharacterTextSplitter.
62
+ Defaults to RecursiveCharacterTextSplitter.
63
+
64
+ Raises:
65
+ ImportError: If langchain-text-splitters is not installed
66
+ and no text_splitter is provided.
48
67
 
49
68
  Returns:
50
69
  List of Documents.
51
70
  """
52
71
  if text_splitter is None:
53
- try:
54
- from langchain_text_splitters import RecursiveCharacterTextSplitter
55
- except ImportError as e:
72
+ if not _HAS_TEXT_SPLITTERS:
56
73
  msg = (
57
74
  "Unable to import from langchain_text_splitters. Please specify "
58
75
  "text_splitter or install langchain_text_splitters with "
59
76
  "`pip install -U langchain-text-splitters`."
60
77
  )
61
- raise ImportError(msg) from e
78
+ raise ImportError(msg)
62
79
 
63
80
  text_splitter_: TextSplitter = RecursiveCharacterTextSplitter()
64
81
  else:
@@ -69,14 +86,22 @@ class BaseLoader(ABC): # noqa: B024
69
86
  # Attention: This method will be upgraded into an abstractmethod once it's
70
87
  # implemented in all the existing subclasses.
71
88
  def lazy_load(self) -> Iterator[Document]:
72
- """A lazy loader for Documents."""
89
+ """A lazy loader for Documents.
90
+
91
+ Yields:
92
+ the documents.
93
+ """
73
94
  if type(self).load != BaseLoader.load:
74
95
  return iter(self.load())
75
96
  msg = f"{self.__class__.__name__} does not implement lazy_load()"
76
97
  raise NotImplementedError(msg)
77
98
 
78
99
  async def alazy_load(self) -> AsyncIterator[Document]:
79
- """A lazy loader for Documents."""
100
+ """A lazy loader for Documents.
101
+
102
+ Yields:
103
+ the documents.
104
+ """
80
105
  iterator = await run_in_executor(None, self.lazy_load)
81
106
  done = object()
82
107
  while True:
@@ -84,6 +84,9 @@ class LangSmithLoader(BaseLoader):
84
84
  client: LangSmith Client. If not provided will be initialized from below args.
85
85
  client_kwargs: Keyword args to pass to LangSmith client init. Should only be
86
86
  specified if ``client`` isn't.
87
+
88
+ Raises:
89
+ ValueError: If both ``client`` and ``client_kwargs`` are provided.
87
90
  """ # noqa: E501
88
91
  if client and client_kwargs:
89
92
  raise ValueError
@@ -82,7 +82,7 @@ class Blob(BaseMedia):
82
82
  blob = Blob.from_data(
83
83
  data="Hello, world!",
84
84
  mime_type="text/plain",
85
- metadata={"source": "https://example.com"}
85
+ metadata={"source": "https://example.com"},
86
86
  )
87
87
 
88
88
  Example: Load the blob from a file
@@ -145,7 +145,14 @@ class Blob(BaseMedia):
145
145
  return values
146
146
 
147
147
  def as_string(self) -> str:
148
- """Read data as a string."""
148
+ """Read data as a string.
149
+
150
+ Raises:
151
+ ValueError: If the blob cannot be represented as a string.
152
+
153
+ Returns:
154
+ The data as a string.
155
+ """
149
156
  if self.data is None and self.path:
150
157
  return Path(self.path).read_text(encoding=self.encoding)
151
158
  if isinstance(self.data, bytes):
@@ -156,7 +163,14 @@ class Blob(BaseMedia):
156
163
  raise ValueError(msg)
157
164
 
158
165
  def as_bytes(self) -> bytes:
159
- """Read data as bytes."""
166
+ """Read data as bytes.
167
+
168
+ Raises:
169
+ ValueError: If the blob cannot be represented as bytes.
170
+
171
+ Returns:
172
+ The data as bytes.
173
+ """
160
174
  if isinstance(self.data, bytes):
161
175
  return self.data
162
176
  if isinstance(self.data, str):
@@ -168,7 +182,14 @@ class Blob(BaseMedia):
168
182
 
169
183
  @contextlib.contextmanager
170
184
  def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]:
171
- """Read data as a byte stream."""
185
+ """Read data as a byte stream.
186
+
187
+ Raises:
188
+ NotImplementedError: If the blob cannot be represented as a byte stream.
189
+
190
+ Yields:
191
+ The data as a byte stream.
192
+ """
172
193
  if isinstance(self.data, bytes):
173
194
  yield BytesIO(self.data)
174
195
  elif self.data is None and self.path:
@@ -246,7 +267,7 @@ class Blob(BaseMedia):
246
267
  )
247
268
 
248
269
  def __repr__(self) -> str:
249
- """Define the blob representation."""
270
+ """Return the blob representation."""
250
271
  str_repr = f"Blob {id(self)}"
251
272
  if self.source:
252
273
  str_repr += f" {self.source}"
@@ -263,8 +284,7 @@ class Document(BaseMedia):
263
284
  from langchain_core.documents import Document
264
285
 
265
286
  document = Document(
266
- page_content="Hello, world!",
267
- metadata={"source": "https://example.com"}
287
+ page_content="Hello, world!", metadata={"source": "https://example.com"}
268
288
  )
269
289
 
270
290
  """
@@ -277,23 +297,28 @@ class Document(BaseMedia):
277
297
  """Pass page_content in as positional or named arg."""
278
298
  # my-py is complaining that page_content is not defined on the base class.
279
299
  # Here, we're relying on pydantic base class to handle the validation.
280
- super().__init__(page_content=page_content, **kwargs) # type: ignore[call-arg]
300
+ super().__init__(page_content=page_content, **kwargs)
281
301
 
282
302
  @classmethod
283
303
  def is_lc_serializable(cls) -> bool:
284
- """Return whether this class is serializable."""
304
+ """Return True as this class is serializable."""
285
305
  return True
286
306
 
287
307
  @classmethod
288
308
  def get_lc_namespace(cls) -> list[str]:
289
309
  """Get the namespace of the langchain object.
290
310
 
291
- Default namespace is ["langchain", "schema", "document"].
311
+ Returns:
312
+ ["langchain", "schema", "document"]
292
313
  """
293
314
  return ["langchain", "schema", "document"]
294
315
 
295
316
  def __str__(self) -> str:
296
- """Override __str__ to restrict it to page_content and metadata."""
317
+ """Override __str__ to restrict it to page_content and metadata.
318
+
319
+ Returns:
320
+ A string representation of the Document.
321
+ """
297
322
  # The format matches pydantic format for __str__.
298
323
  #
299
324
  # The purpose of this change is to make sure that user code that
@@ -19,17 +19,18 @@ if TYPE_CHECKING:
19
19
  class BaseDocumentCompressor(BaseModel, ABC):
20
20
  """Base class for document compressors.
21
21
 
22
- This abstraction is primarily used for
23
- post-processing of retrieved documents.
22
+ This abstraction is primarily used for post-processing of retrieved documents.
24
23
 
25
24
  Documents matching a given query are first retrieved.
25
+
26
26
  Then the list of documents can be further processed.
27
27
 
28
- For example, one could re-rank the retrieved documents
29
- using an LLM.
28
+ For example, one could re-rank the retrieved documents using an LLM.
29
+
30
+ .. note::
31
+ Users should favor using a RunnableLambda instead of sub-classing from this
32
+ interface.
30
33
 
31
- **Note** users should favor using a RunnableLambda
32
- instead of sub-classing from this interface.
33
34
  """
34
35
 
35
36
  @abstractmethod
@@ -48,6 +49,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
48
49
 
49
50
  Returns:
50
51
  The compressed documents.
52
+
51
53
  """
52
54
 
53
55
  async def acompress_documents(
@@ -65,6 +67,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
65
67
 
66
68
  Returns:
67
69
  The compressed documents.
70
+
68
71
  """
69
72
  return await run_in_executor(
70
73
  None, self.compress_documents, documents, query, callbacks
@@ -38,7 +38,9 @@ class BaseDocumentTransformer(ABC):
38
38
  self.embeddings, stateful_documents
39
39
  )
40
40
  included_idxs = _filter_similar_embeddings(
41
- embedded_documents, self.similarity_fn, self.similarity_threshold
41
+ embedded_documents,
42
+ self.similarity_fn,
43
+ self.similarity_threshold,
42
44
  )
43
45
  return [stateful_documents[i] for i in sorted(included_idxs)]
44
46
 
@@ -47,7 +49,7 @@ class BaseDocumentTransformer(ABC):
47
49
  ) -> Sequence[Document]:
48
50
  raise NotImplementedError
49
51
 
50
- """ # noqa: E501
52
+ """
51
53
 
52
54
  @abstractmethod
53
55
  def transform_documents(
@@ -1,6 +1,7 @@
1
1
  """Module contains a few fake embedding models for testing purposes."""
2
2
 
3
3
  # Please do not add additional fake embedding model implementations here.
4
+ import contextlib
4
5
  import hashlib
5
6
 
6
7
  from pydantic import BaseModel
@@ -8,6 +9,9 @@ from typing_extensions import override
8
9
 
9
10
  from langchain_core.embeddings import Embeddings
10
11
 
12
+ with contextlib.suppress(ImportError):
13
+ import numpy as np
14
+
11
15
 
12
16
  class FakeEmbeddings(Embeddings, BaseModel):
13
17
  """Fake embedding model for unit testing purposes.
@@ -20,6 +24,7 @@ class FakeEmbeddings(Embeddings, BaseModel):
20
24
  .. code-block:: python
21
25
 
22
26
  from langchain_core.embeddings import FakeEmbeddings
27
+
23
28
  embed = FakeEmbeddings(size=100)
24
29
 
25
30
  Embed single text:
@@ -53,8 +58,6 @@ class FakeEmbeddings(Embeddings, BaseModel):
53
58
  """The size of the embedding vector."""
54
59
 
55
60
  def _get_embedding(self) -> list[float]:
56
- import numpy as np
57
-
58
61
  return list(np.random.default_rng().normal(size=self.size))
59
62
 
60
63
  @override
@@ -78,6 +81,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
78
81
  .. code-block:: python
79
82
 
80
83
  from langchain_core.embeddings import DeterministicFakeEmbedding
84
+
81
85
  embed = DeterministicFakeEmbedding(size=100)
82
86
 
83
87
  Embed single text:
@@ -111,13 +115,12 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
111
115
  """The size of the embedding vector."""
112
116
 
113
117
  def _get_embedding(self, seed: int) -> list[float]:
114
- import numpy as np
115
-
116
118
  # set the seed for the random generator
117
119
  rng = np.random.default_rng(seed)
118
120
  return list(rng.normal(size=self.size))
119
121
 
120
- def _get_seed(self, text: str) -> int:
122
+ @staticmethod
123
+ def _get_seed(text: str) -> int:
121
124
  """Get a seed for the random generator, using the hash of the text."""
122
125
  return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
123
126
 
langchain_core/env.py CHANGED
@@ -3,6 +3,8 @@
3
3
  import platform
4
4
  from functools import lru_cache
5
5
 
6
+ from langchain_core import __version__
7
+
6
8
 
7
9
  @lru_cache(maxsize=1)
8
10
  def get_runtime_environment() -> dict:
@@ -11,9 +13,6 @@ def get_runtime_environment() -> dict:
11
13
  Returns:
12
14
  A dictionary with information about the runtime environment.
13
15
  """
14
- # Lazy import to avoid circular imports
15
- from langchain_core import __version__
16
-
17
16
  return {
18
17
  "library_version": __version__,
19
18
  "library": "langchain-core",
@@ -16,6 +16,9 @@ class BaseExampleSelector(ABC):
16
16
  Args:
17
17
  example: A dictionary with keys as input variables
18
18
  and values as their values.
19
+
20
+ Returns:
21
+ Any return value.
19
22
  """
20
23
 
21
24
  async def aadd_example(self, example: dict[str, str]) -> Any:
@@ -24,6 +27,9 @@ class BaseExampleSelector(ABC):
24
27
  Args:
25
28
  example: A dictionary with keys as input variables
26
29
  and values as their values.
30
+
31
+ Returns:
32
+ Any return value.
27
33
  """
28
34
  return await run_in_executor(None, self.add_example, example)
29
35
 
@@ -34,6 +40,9 @@ class BaseExampleSelector(ABC):
34
40
  Args:
35
41
  input_variables: A dictionary with keys as input variables
36
42
  and values as their values.
43
+
44
+ Returns:
45
+ A list of examples.
37
46
  """
38
47
 
39
48
  async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]:
@@ -42,5 +51,8 @@ class BaseExampleSelector(ABC):
42
51
  Args:
43
52
  input_variables: A dictionary with keys as input variables
44
53
  and values as their values.
54
+
55
+ Returns:
56
+ A list of examples.
45
57
  """
46
58
  return await run_in_executor(None, self.select_examples, input_variables)
@@ -42,6 +42,10 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
42
42
  previous output was improperly structured, in the hopes that it will
43
43
  update the output to the correct format.
44
44
  Defaults to False.
45
+
46
+ Raises:
47
+ ValueError: If ``send_to_llm`` is True but either observation or
48
+ ``llm_output`` are not provided.
45
49
  """
46
50
  if isinstance(error, str):
47
51
  error = create_message(
@@ -77,6 +81,9 @@ def create_message(*, message: str, error_code: ErrorCode) -> str:
77
81
  Args:
78
82
  message: The message to display.
79
83
  error_code: The error code to display.
84
+
85
+ Returns:
86
+ The full message with the troubleshooting link.
80
87
  """
81
88
  return (
82
89
  f"{message}\n"
langchain_core/globals.py CHANGED
@@ -6,6 +6,13 @@ from typing import TYPE_CHECKING, Optional
6
6
  if TYPE_CHECKING:
7
7
  from langchain_core.caches import BaseCache
8
8
 
9
+ try:
10
+ import langchain # type: ignore[import-not-found]
11
+
12
+ _HAS_LANGCHAIN = True
13
+ except ImportError:
14
+ _HAS_LANGCHAIN = False
15
+
9
16
 
10
17
  # DO NOT USE THESE VALUES DIRECTLY!
11
18
  # Use them only via `get_<X>()` and `set_<X>()` below,
@@ -22,9 +29,7 @@ def set_verbose(value: bool) -> None: # noqa: FBT001
22
29
  Args:
23
30
  value: The new value for the `verbose` global setting.
24
31
  """
25
- try:
26
- import langchain # type: ignore[import-not-found]
27
-
32
+ if _HAS_LANGCHAIN:
28
33
  # We're about to run some deprecated code, don't report warnings from it.
29
34
  # The user called the correct (non-deprecated) code path and shouldn't get
30
35
  # warnings.
@@ -43,8 +48,6 @@ def set_verbose(value: bool) -> None: # noqa: FBT001
43
48
  # Remove it once `langchain.verbose` is no longer supported, and once all
44
49
  # users have migrated to using `set_verbose()` here.
45
50
  langchain.verbose = value
46
- except ImportError:
47
- pass
48
51
 
49
52
  global _verbose # noqa: PLW0603
50
53
  _verbose = value
@@ -56,9 +59,7 @@ def get_verbose() -> bool:
56
59
  Returns:
57
60
  The value of the `verbose` global setting.
58
61
  """
59
- try:
60
- import langchain
61
-
62
+ if _HAS_LANGCHAIN:
62
63
  # We're about to run some deprecated code, don't report warnings from it.
63
64
  # The user called the correct (non-deprecated) code path and shouldn't get
64
65
  # warnings.
@@ -83,7 +84,7 @@ def get_verbose() -> bool:
83
84
  # deprecation warnings directing them to use `set_verbose()` when they
84
85
  # import `langchain.verbose`.
85
86
  old_verbose = langchain.verbose
86
- except ImportError:
87
+ else:
87
88
  old_verbose = False
88
89
 
89
90
  return _verbose or old_verbose
@@ -95,9 +96,7 @@ def set_debug(value: bool) -> None: # noqa: FBT001
95
96
  Args:
96
97
  value: The new value for the `debug` global setting.
97
98
  """
98
- try:
99
- import langchain
100
-
99
+ if _HAS_LANGCHAIN:
101
100
  # We're about to run some deprecated code, don't report warnings from it.
102
101
  # The user called the correct (non-deprecated) code path and shouldn't get
103
102
  # warnings.
@@ -114,8 +113,6 @@ def set_debug(value: bool) -> None: # noqa: FBT001
114
113
  # Remove it once `langchain.debug` is no longer supported, and once all
115
114
  # users have migrated to using `set_debug()` here.
116
115
  langchain.debug = value
117
- except ImportError:
118
- pass
119
116
 
120
117
  global _debug # noqa: PLW0603
121
118
  _debug = value
@@ -127,9 +124,7 @@ def get_debug() -> bool:
127
124
  Returns:
128
125
  The value of the `debug` global setting.
129
126
  """
130
- try:
131
- import langchain
132
-
127
+ if _HAS_LANGCHAIN:
133
128
  # We're about to run some deprecated code, don't report warnings from it.
134
129
  # The user called the correct (non-deprecated) code path and shouldn't get
135
130
  # warnings.
@@ -151,7 +146,7 @@ def get_debug() -> bool:
151
146
  # to using `set_debug()` yet. Those users are getting deprecation warnings
152
147
  # directing them to use `set_debug()` when they import `langchain.debug`.
153
148
  old_debug = langchain.debug
154
- except ImportError:
149
+ else:
155
150
  old_debug = False
156
151
 
157
152
  return _debug or old_debug
@@ -163,9 +158,7 @@ def set_llm_cache(value: Optional["BaseCache"]) -> None:
163
158
  Args:
164
159
  value: The new LLM cache to use. If `None`, the LLM cache is disabled.
165
160
  """
166
- try:
167
- import langchain
168
-
161
+ if _HAS_LANGCHAIN:
169
162
  # We're about to run some deprecated code, don't report warnings from it.
170
163
  # The user called the correct (non-deprecated) code path and shouldn't get
171
164
  # warnings.
@@ -184,22 +177,18 @@ def set_llm_cache(value: Optional["BaseCache"]) -> None:
184
177
  # Remove it once `langchain.llm_cache` is no longer supported, and
185
178
  # once all users have migrated to using `set_llm_cache()` here.
186
179
  langchain.llm_cache = value
187
- except ImportError:
188
- pass
189
180
 
190
181
  global _llm_cache # noqa: PLW0603
191
182
  _llm_cache = value
192
183
 
193
184
 
194
- def get_llm_cache() -> "BaseCache":
185
+ def get_llm_cache() -> Optional["BaseCache"]:
195
186
  """Get the value of the `llm_cache` global setting.
196
187
 
197
188
  Returns:
198
189
  The value of the `llm_cache` global setting.
199
190
  """
200
- try:
201
- import langchain
202
-
191
+ if _HAS_LANGCHAIN:
203
192
  # We're about to run some deprecated code, don't report warnings from it.
204
193
  # The user called the correct (non-deprecated) code path and shouldn't get
205
194
  # warnings.
@@ -225,7 +214,7 @@ def get_llm_cache() -> "BaseCache":
225
214
  # Those users are getting deprecation warnings directing them
226
215
  # to use `set_llm_cache()` when they import `langchain.llm_cache`.
227
216
  old_llm_cache = langchain.llm_cache
228
- except ImportError:
217
+ else:
229
218
  old_llm_cache = None
230
219
 
231
220
  return _llm_cache or old_llm_cache