langchain-core 1.0.0a6__py3-none-any.whl → 1.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (131) hide show
  1. langchain_core/_api/__init__.py +3 -3
  2. langchain_core/_api/beta_decorator.py +6 -6
  3. langchain_core/_api/deprecation.py +21 -29
  4. langchain_core/_api/path.py +3 -6
  5. langchain_core/_import_utils.py +2 -3
  6. langchain_core/agents.py +10 -11
  7. langchain_core/caches.py +7 -7
  8. langchain_core/callbacks/base.py +91 -91
  9. langchain_core/callbacks/file.py +11 -11
  10. langchain_core/callbacks/manager.py +86 -89
  11. langchain_core/callbacks/stdout.py +8 -8
  12. langchain_core/callbacks/usage.py +4 -4
  13. langchain_core/chat_history.py +5 -5
  14. langchain_core/document_loaders/base.py +2 -2
  15. langchain_core/document_loaders/langsmith.py +15 -15
  16. langchain_core/documents/base.py +16 -16
  17. langchain_core/documents/compressor.py +4 -4
  18. langchain_core/example_selectors/length_based.py +1 -1
  19. langchain_core/example_selectors/semantic_similarity.py +17 -19
  20. langchain_core/exceptions.py +3 -3
  21. langchain_core/globals.py +3 -151
  22. langchain_core/indexing/api.py +44 -43
  23. langchain_core/indexing/base.py +30 -30
  24. langchain_core/indexing/in_memory.py +3 -3
  25. langchain_core/language_models/_utils.py +5 -7
  26. langchain_core/language_models/base.py +18 -132
  27. langchain_core/language_models/chat_models.py +118 -227
  28. langchain_core/language_models/fake.py +11 -11
  29. langchain_core/language_models/fake_chat_models.py +35 -29
  30. langchain_core/language_models/llms.py +91 -201
  31. langchain_core/load/dump.py +1 -1
  32. langchain_core/load/load.py +11 -12
  33. langchain_core/load/mapping.py +2 -4
  34. langchain_core/load/serializable.py +2 -4
  35. langchain_core/messages/ai.py +17 -20
  36. langchain_core/messages/base.py +23 -25
  37. langchain_core/messages/block_translators/__init__.py +2 -5
  38. langchain_core/messages/block_translators/anthropic.py +3 -3
  39. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  40. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  41. langchain_core/messages/block_translators/openai.py +6 -6
  42. langchain_core/messages/content.py +120 -124
  43. langchain_core/messages/human.py +7 -7
  44. langchain_core/messages/system.py +7 -7
  45. langchain_core/messages/tool.py +24 -24
  46. langchain_core/messages/utils.py +67 -79
  47. langchain_core/output_parsers/base.py +12 -14
  48. langchain_core/output_parsers/json.py +4 -4
  49. langchain_core/output_parsers/list.py +3 -5
  50. langchain_core/output_parsers/openai_functions.py +3 -3
  51. langchain_core/output_parsers/openai_tools.py +3 -3
  52. langchain_core/output_parsers/pydantic.py +2 -2
  53. langchain_core/output_parsers/transform.py +13 -15
  54. langchain_core/output_parsers/xml.py +7 -9
  55. langchain_core/outputs/chat_generation.py +4 -4
  56. langchain_core/outputs/chat_result.py +1 -3
  57. langchain_core/outputs/generation.py +2 -2
  58. langchain_core/outputs/llm_result.py +5 -5
  59. langchain_core/prompts/__init__.py +1 -5
  60. langchain_core/prompts/base.py +10 -15
  61. langchain_core/prompts/chat.py +31 -82
  62. langchain_core/prompts/dict.py +2 -2
  63. langchain_core/prompts/few_shot.py +5 -5
  64. langchain_core/prompts/few_shot_with_templates.py +4 -4
  65. langchain_core/prompts/loading.py +3 -5
  66. langchain_core/prompts/prompt.py +4 -16
  67. langchain_core/prompts/string.py +2 -1
  68. langchain_core/prompts/structured.py +16 -23
  69. langchain_core/rate_limiters.py +3 -4
  70. langchain_core/retrievers.py +14 -14
  71. langchain_core/runnables/base.py +928 -1042
  72. langchain_core/runnables/branch.py +36 -40
  73. langchain_core/runnables/config.py +27 -35
  74. langchain_core/runnables/configurable.py +108 -124
  75. langchain_core/runnables/fallbacks.py +76 -72
  76. langchain_core/runnables/graph.py +39 -45
  77. langchain_core/runnables/graph_ascii.py +9 -11
  78. langchain_core/runnables/graph_mermaid.py +18 -19
  79. langchain_core/runnables/graph_png.py +8 -9
  80. langchain_core/runnables/history.py +114 -127
  81. langchain_core/runnables/passthrough.py +113 -139
  82. langchain_core/runnables/retry.py +43 -48
  83. langchain_core/runnables/router.py +23 -28
  84. langchain_core/runnables/schema.py +42 -44
  85. langchain_core/runnables/utils.py +28 -31
  86. langchain_core/stores.py +9 -13
  87. langchain_core/structured_query.py +8 -8
  88. langchain_core/tools/base.py +62 -115
  89. langchain_core/tools/convert.py +31 -35
  90. langchain_core/tools/render.py +1 -1
  91. langchain_core/tools/retriever.py +4 -4
  92. langchain_core/tools/simple.py +13 -17
  93. langchain_core/tools/structured.py +12 -15
  94. langchain_core/tracers/base.py +62 -64
  95. langchain_core/tracers/context.py +17 -35
  96. langchain_core/tracers/core.py +49 -53
  97. langchain_core/tracers/evaluation.py +11 -11
  98. langchain_core/tracers/event_stream.py +58 -60
  99. langchain_core/tracers/langchain.py +13 -13
  100. langchain_core/tracers/log_stream.py +22 -24
  101. langchain_core/tracers/root_listeners.py +14 -14
  102. langchain_core/tracers/run_collector.py +2 -4
  103. langchain_core/tracers/schemas.py +8 -8
  104. langchain_core/tracers/stdout.py +2 -1
  105. langchain_core/utils/__init__.py +0 -3
  106. langchain_core/utils/_merge.py +2 -2
  107. langchain_core/utils/aiter.py +24 -28
  108. langchain_core/utils/env.py +4 -4
  109. langchain_core/utils/function_calling.py +31 -41
  110. langchain_core/utils/html.py +3 -4
  111. langchain_core/utils/input.py +3 -3
  112. langchain_core/utils/iter.py +15 -19
  113. langchain_core/utils/json.py +3 -2
  114. langchain_core/utils/json_schema.py +6 -6
  115. langchain_core/utils/mustache.py +3 -5
  116. langchain_core/utils/pydantic.py +16 -18
  117. langchain_core/utils/usage.py +1 -1
  118. langchain_core/utils/utils.py +29 -29
  119. langchain_core/vectorstores/base.py +18 -21
  120. langchain_core/vectorstores/in_memory.py +14 -87
  121. langchain_core/vectorstores/utils.py +2 -2
  122. langchain_core/version.py +1 -1
  123. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/METADATA +10 -21
  124. langchain_core-1.0.0a8.dist-info/RECORD +176 -0
  125. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/WHEEL +1 -1
  126. langchain_core/messages/block_translators/ollama.py +0 -47
  127. langchain_core/prompts/pipeline.py +0 -138
  128. langchain_core/tracers/langchain_v1.py +0 -31
  129. langchain_core/utils/loading.py +0 -35
  130. langchain_core-1.0.0a6.dist-info/RECORD +0 -181
  131. langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
@@ -3,8 +3,8 @@
3
3
  import datetime
4
4
  import json
5
5
  import uuid
6
- from collections.abc import Iterator, Sequence
7
- from typing import Any, Callable, Optional, Union
6
+ from collections.abc import Callable, Iterator, Sequence
7
+ from typing import Any
8
8
 
9
9
  from langsmith import Client as LangSmithClient
10
10
  from typing_extensions import override
@@ -20,7 +20,7 @@ class LangSmithLoader(BaseLoader):
20
20
  into the Document metadata. This allows you to easily create few-shot example
21
21
  retrievers from the loaded documents.
22
22
 
23
- .. dropdown:: Lazy load
23
+ ??? note "Lazy load"
24
24
 
25
25
  .. code-block:: python
26
26
 
@@ -35,26 +35,26 @@ class LangSmithLoader(BaseLoader):
35
35
 
36
36
  # -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
37
37
 
38
- .. versionadded:: 0.2.34
38
+ !!! version-added "Added in version 0.2.34"
39
39
 
40
40
  """ # noqa: E501
41
41
 
42
42
  def __init__(
43
43
  self,
44
44
  *,
45
- dataset_id: Optional[Union[uuid.UUID, str]] = None,
46
- dataset_name: Optional[str] = None,
47
- example_ids: Optional[Sequence[Union[uuid.UUID, str]]] = None,
48
- as_of: Optional[Union[datetime.datetime, str]] = None,
49
- splits: Optional[Sequence[str]] = None,
45
+ dataset_id: uuid.UUID | str | None = None,
46
+ dataset_name: str | None = None,
47
+ example_ids: Sequence[uuid.UUID | str] | None = None,
48
+ as_of: datetime.datetime | str | None = None,
49
+ splits: Sequence[str] | None = None,
50
50
  inline_s3_urls: bool = True,
51
51
  offset: int = 0,
52
- limit: Optional[int] = None,
53
- metadata: Optional[dict] = None,
54
- filter: Optional[str] = None, # noqa: A002
52
+ limit: int | None = None,
53
+ metadata: dict | None = None,
54
+ filter: str | None = None, # noqa: A002
55
55
  content_key: str = "",
56
- format_content: Optional[Callable[..., str]] = None,
57
- client: Optional[LangSmithClient] = None,
56
+ format_content: Callable[..., str] | None = None,
57
+ client: LangSmithClient | None = None,
58
58
  **client_kwargs: Any,
59
59
  ) -> None:
60
60
  """Create a LangSmith loader.
@@ -129,7 +129,7 @@ class LangSmithLoader(BaseLoader):
129
129
  yield Document(content_str, metadata=metadata)
130
130
 
131
131
 
132
- def _stringify(x: Union[str, dict]) -> str:
132
+ def _stringify(x: str | dict) -> str:
133
133
  if isinstance(x, str):
134
134
  return x
135
135
  try:
@@ -6,7 +6,7 @@ import contextlib
6
6
  import mimetypes
7
7
  from io import BufferedReader, BytesIO
8
8
  from pathlib import Path, PurePath
9
- from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast
9
+ from typing import TYPE_CHECKING, Any, Literal, cast
10
10
 
11
11
  from pydantic import ConfigDict, Field, model_validator
12
12
 
@@ -15,7 +15,7 @@ from langchain_core.load.serializable import Serializable
15
15
  if TYPE_CHECKING:
16
16
  from collections.abc import Generator
17
17
 
18
- PathLike = Union[str, PurePath]
18
+ PathLike = str | PurePath
19
19
 
20
20
 
21
21
  class BaseMedia(Serializable):
@@ -33,13 +33,13 @@ class BaseMedia(Serializable):
33
33
  # The ID field is optional at the moment.
34
34
  # It will likely become required in a future major release after
35
35
  # it has been adopted by enough vectorstore implementations.
36
- id: Optional[str] = Field(default=None, coerce_numbers_to_str=True)
36
+ id: str | None = Field(default=None, coerce_numbers_to_str=True)
37
37
  """An optional identifier for the document.
38
38
 
39
39
  Ideally this should be unique across the document collection and formatted
40
40
  as a UUID, but this will not be enforced.
41
41
 
42
- .. versionadded:: 0.2.11
42
+ !!! version-added "Added in version 0.2.11"
43
43
  """
44
44
 
45
45
  metadata: dict = Field(default_factory=dict)
@@ -105,16 +105,16 @@ class Blob(BaseMedia):
105
105
 
106
106
  """
107
107
 
108
- data: Union[bytes, str, None] = None
108
+ data: bytes | str | None = None
109
109
  """Raw data associated with the blob."""
110
- mimetype: Optional[str] = None
110
+ mimetype: str | None = None
111
111
  """MimeType not to be confused with a file extension."""
112
112
  encoding: str = "utf-8"
113
113
  """Encoding to use if decoding the bytes into a string.
114
114
 
115
115
  Use utf-8 as default encoding, if decoding to string.
116
116
  """
117
- path: Optional[PathLike] = None
117
+ path: PathLike | None = None
118
118
  """Location where the original content was found."""
119
119
 
120
120
  model_config = ConfigDict(
@@ -123,7 +123,7 @@ class Blob(BaseMedia):
123
123
  )
124
124
 
125
125
  @property
126
- def source(self) -> Optional[str]:
126
+ def source(self) -> str | None:
127
127
  """The source location of the blob as string if known otherwise none.
128
128
 
129
129
  If a path is associated with the blob, it will default to the path location.
@@ -132,7 +132,7 @@ class Blob(BaseMedia):
132
132
  case that value will be used instead.
133
133
  """
134
134
  if self.metadata and "source" in self.metadata:
135
- return cast("Optional[str]", self.metadata["source"])
135
+ return cast("str | None", self.metadata["source"])
136
136
  return str(self.path) if self.path else None
137
137
 
138
138
  @model_validator(mode="before")
@@ -181,7 +181,7 @@ class Blob(BaseMedia):
181
181
  raise ValueError(msg)
182
182
 
183
183
  @contextlib.contextmanager
184
- def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]:
184
+ def as_bytes_io(self) -> Generator[BytesIO | BufferedReader, None, None]:
185
185
  """Read data as a byte stream.
186
186
 
187
187
  Raises:
@@ -205,9 +205,9 @@ class Blob(BaseMedia):
205
205
  path: PathLike,
206
206
  *,
207
207
  encoding: str = "utf-8",
208
- mime_type: Optional[str] = None,
208
+ mime_type: str | None = None,
209
209
  guess_type: bool = True,
210
- metadata: Optional[dict] = None,
210
+ metadata: dict | None = None,
211
211
  ) -> Blob:
212
212
  """Load the blob from a path like object.
213
213
 
@@ -239,12 +239,12 @@ class Blob(BaseMedia):
239
239
  @classmethod
240
240
  def from_data(
241
241
  cls,
242
- data: Union[str, bytes],
242
+ data: str | bytes,
243
243
  *,
244
244
  encoding: str = "utf-8",
245
- mime_type: Optional[str] = None,
246
- path: Optional[str] = None,
247
- metadata: Optional[dict] = None,
245
+ mime_type: str | None = None,
246
+ path: str | None = None,
247
+ metadata: dict | None = None,
248
248
  ) -> Blob:
249
249
  """Initialize the blob from in-memory data.
250
250
 
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from abc import ABC, abstractmethod
6
- from typing import TYPE_CHECKING, Optional
6
+ from typing import TYPE_CHECKING
7
7
 
8
8
  from pydantic import BaseModel
9
9
 
@@ -27,7 +27,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
27
27
 
28
28
  For example, one could re-rank the retrieved documents using an LLM.
29
29
 
30
- .. note::
30
+ !!! note
31
31
  Users should favor using a RunnableLambda instead of sub-classing from this
32
32
  interface.
33
33
 
@@ -38,7 +38,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
38
38
  self,
39
39
  documents: Sequence[Document],
40
40
  query: str,
41
- callbacks: Optional[Callbacks] = None,
41
+ callbacks: Callbacks | None = None,
42
42
  ) -> Sequence[Document]:
43
43
  """Compress retrieved documents given the query context.
44
44
 
@@ -56,7 +56,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
56
56
  self,
57
57
  documents: Sequence[Document],
58
58
  query: str,
59
- callbacks: Optional[Callbacks] = None,
59
+ callbacks: Callbacks | None = None,
60
60
  ) -> Sequence[Document]:
61
61
  """Async compress retrieved documents given the query context.
62
62
 
@@ -1,7 +1,7 @@
1
1
  """Select examples based on length."""
2
2
 
3
3
  import re
4
- from typing import Callable
4
+ from collections.abc import Callable
5
5
 
6
6
  from pydantic import BaseModel, Field, model_validator
7
7
  from typing_extensions import Self
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from abc import ABC
6
- from typing import TYPE_CHECKING, Any, Optional
6
+ from typing import TYPE_CHECKING, Any
7
7
 
8
8
  from pydantic import BaseModel, ConfigDict
9
9
 
@@ -35,12 +35,12 @@ class _VectorStoreExampleSelector(BaseExampleSelector, BaseModel, ABC):
35
35
  """VectorStore that contains information about examples."""
36
36
  k: int = 4
37
37
  """Number of examples to select."""
38
- example_keys: Optional[list[str]] = None
38
+ example_keys: list[str] | None = None
39
39
  """Optional keys to filter examples to."""
40
- input_keys: Optional[list[str]] = None
40
+ input_keys: list[str] | None = None
41
41
  """Optional keys to filter input to. If provided, the search is based on
42
42
  the input variables instead of all variables."""
43
- vectorstore_kwargs: Optional[dict[str, Any]] = None
43
+ vectorstore_kwargs: dict[str, Any] | None = None
44
44
  """Extra arguments passed to similarity_search function of the vectorstore."""
45
45
 
46
46
  model_config = ConfigDict(
@@ -49,9 +49,7 @@ class _VectorStoreExampleSelector(BaseExampleSelector, BaseModel, ABC):
49
49
  )
50
50
 
51
51
  @staticmethod
52
- def _example_to_text(
53
- example: dict[str, str], input_keys: Optional[list[str]]
54
- ) -> str:
52
+ def _example_to_text(example: dict[str, str], input_keys: list[str] | None) -> str:
55
53
  if input_keys:
56
54
  return " ".join(sorted_values({key: example[key] for key in input_keys}))
57
55
  return " ".join(sorted_values(example))
@@ -142,10 +140,10 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
142
140
  embeddings: Embeddings,
143
141
  vectorstore_cls: type[VectorStore],
144
142
  k: int = 4,
145
- input_keys: Optional[list[str]] = None,
143
+ input_keys: list[str] | None = None,
146
144
  *,
147
- example_keys: Optional[list[str]] = None,
148
- vectorstore_kwargs: Optional[dict] = None,
145
+ example_keys: list[str] | None = None,
146
+ vectorstore_kwargs: dict | None = None,
149
147
  **vectorstore_cls_kwargs: Any,
150
148
  ) -> SemanticSimilarityExampleSelector:
151
149
  """Create k-shot example selector using example list and embeddings.
@@ -186,10 +184,10 @@ class SemanticSimilarityExampleSelector(_VectorStoreExampleSelector):
186
184
  embeddings: Embeddings,
187
185
  vectorstore_cls: type[VectorStore],
188
186
  k: int = 4,
189
- input_keys: Optional[list[str]] = None,
187
+ input_keys: list[str] | None = None,
190
188
  *,
191
- example_keys: Optional[list[str]] = None,
192
- vectorstore_kwargs: Optional[dict] = None,
189
+ example_keys: list[str] | None = None,
190
+ vectorstore_kwargs: dict | None = None,
193
191
  **vectorstore_cls_kwargs: Any,
194
192
  ) -> SemanticSimilarityExampleSelector:
195
193
  """Async create k-shot example selector using example list and embeddings.
@@ -273,10 +271,10 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
273
271
  embeddings: Embeddings,
274
272
  vectorstore_cls: type[VectorStore],
275
273
  k: int = 4,
276
- input_keys: Optional[list[str]] = None,
274
+ input_keys: list[str] | None = None,
277
275
  fetch_k: int = 20,
278
- example_keys: Optional[list[str]] = None,
279
- vectorstore_kwargs: Optional[dict] = None,
276
+ example_keys: list[str] | None = None,
277
+ vectorstore_kwargs: dict | None = None,
280
278
  **vectorstore_cls_kwargs: Any,
281
279
  ) -> MaxMarginalRelevanceExampleSelector:
282
280
  """Create k-shot example selector using example list and embeddings.
@@ -321,10 +319,10 @@ class MaxMarginalRelevanceExampleSelector(_VectorStoreExampleSelector):
321
319
  vectorstore_cls: type[VectorStore],
322
320
  *,
323
321
  k: int = 4,
324
- input_keys: Optional[list[str]] = None,
322
+ input_keys: list[str] | None = None,
325
323
  fetch_k: int = 20,
326
- example_keys: Optional[list[str]] = None,
327
- vectorstore_kwargs: Optional[dict] = None,
324
+ example_keys: list[str] | None = None,
325
+ vectorstore_kwargs: dict | None = None,
328
326
  **vectorstore_cls_kwargs: Any,
329
327
  ) -> MaxMarginalRelevanceExampleSelector:
330
328
  """Create k-shot example selector using example list and embeddings.
@@ -1,7 +1,7 @@
1
1
  """Custom **exceptions** for LangChain."""
2
2
 
3
3
  from enum import Enum
4
- from typing import Any, Optional
4
+ from typing import Any
5
5
 
6
6
 
7
7
  class LangChainException(Exception): # noqa: N818
@@ -24,8 +24,8 @@ class OutputParserException(ValueError, LangChainException): # noqa: N818
24
24
  def __init__(
25
25
  self,
26
26
  error: Any,
27
- observation: Optional[str] = None,
28
- llm_output: Optional[str] = None,
27
+ observation: str | None = None,
28
+ llm_output: str | None = None,
29
29
  send_to_llm: bool = False, # noqa: FBT001,FBT002
30
30
  ):
31
31
  """Create an OutputParserException.
langchain_core/globals.py CHANGED
@@ -1,18 +1,10 @@
1
1
  """Global values and configuration that apply to all of LangChain."""
2
2
 
3
- import warnings
4
3
  from typing import TYPE_CHECKING, Optional
5
4
 
6
5
  if TYPE_CHECKING:
7
6
  from langchain_core.caches import BaseCache
8
7
 
9
- try:
10
- import langchain # type: ignore[import-not-found]
11
-
12
- _HAS_LANGCHAIN = True
13
- except ImportError:
14
- _HAS_LANGCHAIN = False
15
-
16
8
 
17
9
  # DO NOT USE THESE VALUES DIRECTLY!
18
10
  # Use them only via `get_<X>()` and `set_<X>()` below,
@@ -29,26 +21,6 @@ def set_verbose(value: bool) -> None: # noqa: FBT001
29
21
  Args:
30
22
  value: The new value for the `verbose` global setting.
31
23
  """
32
- if _HAS_LANGCHAIN:
33
- # We're about to run some deprecated code, don't report warnings from it.
34
- # The user called the correct (non-deprecated) code path and shouldn't get
35
- # warnings.
36
- with warnings.catch_warnings():
37
- warnings.filterwarnings(
38
- "ignore",
39
- message=(
40
- "Importing verbose from langchain root module "
41
- "is no longer supported"
42
- ),
43
- )
44
- # N.B.: This is a workaround for an unfortunate quirk of Python's
45
- # module-level `__getattr__()` implementation:
46
- # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
47
- #
48
- # Remove it once `langchain.verbose` is no longer supported, and once all
49
- # users have migrated to using `set_verbose()` here.
50
- langchain.verbose = value
51
-
52
24
  global _verbose # noqa: PLW0603
53
25
  _verbose = value
54
26
 
@@ -59,35 +31,7 @@ def get_verbose() -> bool:
59
31
  Returns:
60
32
  The value of the `verbose` global setting.
61
33
  """
62
- if _HAS_LANGCHAIN:
63
- # We're about to run some deprecated code, don't report warnings from it.
64
- # The user called the correct (non-deprecated) code path and shouldn't get
65
- # warnings.
66
- with warnings.catch_warnings():
67
- warnings.filterwarnings(
68
- "ignore",
69
- message=(
70
- ".*Importing verbose from langchain root module "
71
- "is no longer supported"
72
- ),
73
- )
74
- # N.B.: This is a workaround for an unfortunate quirk of Python's
75
- # module-level `__getattr__()` implementation:
76
- # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
77
- #
78
- # Remove it once `langchain.verbose` is no longer supported, and once all
79
- # users have migrated to using `set_verbose()` here.
80
- #
81
- # In the meantime, the `verbose` setting is considered True if either the
82
- # old or the new value are True. This accommodates users who haven't
83
- # migrated to using `set_verbose()` yet. Those users are getting
84
- # deprecation warnings directing them to use `set_verbose()` when they
85
- # import `langchain.verbose`.
86
- old_verbose = langchain.verbose
87
- else:
88
- old_verbose = False
89
-
90
- return _verbose or old_verbose
34
+ return _verbose
91
35
 
92
36
 
93
37
  def set_debug(value: bool) -> None: # noqa: FBT001
@@ -96,24 +40,6 @@ def set_debug(value: bool) -> None: # noqa: FBT001
96
40
  Args:
97
41
  value: The new value for the `debug` global setting.
98
42
  """
99
- if _HAS_LANGCHAIN:
100
- # We're about to run some deprecated code, don't report warnings from it.
101
- # The user called the correct (non-deprecated) code path and shouldn't get
102
- # warnings.
103
- with warnings.catch_warnings():
104
- warnings.filterwarnings(
105
- "ignore",
106
- message="Importing debug from langchain root module "
107
- "is no longer supported",
108
- )
109
- # N.B.: This is a workaround for an unfortunate quirk of Python's
110
- # module-level `__getattr__()` implementation:
111
- # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
112
- #
113
- # Remove it once `langchain.debug` is no longer supported, and once all
114
- # users have migrated to using `set_debug()` here.
115
- langchain.debug = value
116
-
117
43
  global _debug # noqa: PLW0603
118
44
  _debug = value
119
45
 
@@ -124,32 +50,7 @@ def get_debug() -> bool:
124
50
  Returns:
125
51
  The value of the `debug` global setting.
126
52
  """
127
- if _HAS_LANGCHAIN:
128
- # We're about to run some deprecated code, don't report warnings from it.
129
- # The user called the correct (non-deprecated) code path and shouldn't get
130
- # warnings.
131
- with warnings.catch_warnings():
132
- warnings.filterwarnings(
133
- "ignore",
134
- message="Importing debug from langchain root module "
135
- "is no longer supported",
136
- )
137
- # N.B.: This is a workaround for an unfortunate quirk of Python's
138
- # module-level `__getattr__()` implementation:
139
- # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
140
- #
141
- # Remove it once `langchain.debug` is no longer supported, and once all
142
- # users have migrated to using `set_debug()` here.
143
- #
144
- # In the meantime, the `debug` setting is considered True if either the old
145
- # or the new value are True. This accommodates users who haven't migrated
146
- # to using `set_debug()` yet. Those users are getting deprecation warnings
147
- # directing them to use `set_debug()` when they import `langchain.debug`.
148
- old_debug = langchain.debug
149
- else:
150
- old_debug = False
151
-
152
- return _debug or old_debug
53
+ return _debug
153
54
 
154
55
 
155
56
  def set_llm_cache(value: Optional["BaseCache"]) -> None:
@@ -158,26 +59,6 @@ def set_llm_cache(value: Optional["BaseCache"]) -> None:
158
59
  Args:
159
60
  value: The new LLM cache to use. If `None`, the LLM cache is disabled.
160
61
  """
161
- if _HAS_LANGCHAIN:
162
- # We're about to run some deprecated code, don't report warnings from it.
163
- # The user called the correct (non-deprecated) code path and shouldn't get
164
- # warnings.
165
- with warnings.catch_warnings():
166
- warnings.filterwarnings(
167
- "ignore",
168
- message=(
169
- "Importing llm_cache from langchain root module "
170
- "is no longer supported"
171
- ),
172
- )
173
- # N.B.: This is a workaround for an unfortunate quirk of Python's
174
- # module-level `__getattr__()` implementation:
175
- # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
176
- #
177
- # Remove it once `langchain.llm_cache` is no longer supported, and
178
- # once all users have migrated to using `set_llm_cache()` here.
179
- langchain.llm_cache = value
180
-
181
62
  global _llm_cache # noqa: PLW0603
182
63
  _llm_cache = value
183
64
 
@@ -188,33 +69,4 @@ def get_llm_cache() -> Optional["BaseCache"]:
188
69
  Returns:
189
70
  The value of the `llm_cache` global setting.
190
71
  """
191
- if _HAS_LANGCHAIN:
192
- # We're about to run some deprecated code, don't report warnings from it.
193
- # The user called the correct (non-deprecated) code path and shouldn't get
194
- # warnings.
195
- with warnings.catch_warnings():
196
- warnings.filterwarnings(
197
- "ignore",
198
- message=(
199
- "Importing llm_cache from langchain root module "
200
- "is no longer supported"
201
- ),
202
- )
203
- # N.B.: This is a workaround for an unfortunate quirk of Python's
204
- # module-level `__getattr__()` implementation:
205
- # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
206
- #
207
- # Remove it once `langchain.llm_cache` is no longer supported, and
208
- # once all users have migrated to using `set_llm_cache()` here.
209
- #
210
- # In the meantime, the `llm_cache` setting returns whichever of
211
- # its two backing sources is truthy (not `None` and non-empty),
212
- # or the old value if both are falsy. This accommodates users
213
- # who haven't migrated to using `set_llm_cache()` yet.
214
- # Those users are getting deprecation warnings directing them
215
- # to use `set_llm_cache()` when they import `langchain.llm_cache`.
216
- old_llm_cache = langchain.llm_cache
217
- else:
218
- old_llm_cache = None
219
-
220
- return _llm_cache or old_llm_cache
72
+ return _llm_cache