langchain-core 1.0.0a6__py3-none-any.whl → 1.0.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (131) hide show
  1. langchain_core/_api/__init__.py +3 -3
  2. langchain_core/_api/beta_decorator.py +6 -6
  3. langchain_core/_api/deprecation.py +21 -29
  4. langchain_core/_api/path.py +3 -6
  5. langchain_core/_import_utils.py +2 -3
  6. langchain_core/agents.py +10 -11
  7. langchain_core/caches.py +7 -7
  8. langchain_core/callbacks/base.py +91 -91
  9. langchain_core/callbacks/file.py +11 -11
  10. langchain_core/callbacks/manager.py +86 -89
  11. langchain_core/callbacks/stdout.py +8 -8
  12. langchain_core/callbacks/usage.py +4 -4
  13. langchain_core/chat_history.py +1 -37
  14. langchain_core/document_loaders/base.py +2 -2
  15. langchain_core/document_loaders/langsmith.py +15 -15
  16. langchain_core/documents/base.py +16 -16
  17. langchain_core/documents/compressor.py +4 -4
  18. langchain_core/example_selectors/length_based.py +1 -1
  19. langchain_core/example_selectors/semantic_similarity.py +17 -19
  20. langchain_core/exceptions.py +3 -3
  21. langchain_core/globals.py +3 -151
  22. langchain_core/indexing/api.py +44 -43
  23. langchain_core/indexing/base.py +30 -30
  24. langchain_core/indexing/in_memory.py +3 -3
  25. langchain_core/language_models/_utils.py +5 -7
  26. langchain_core/language_models/base.py +18 -132
  27. langchain_core/language_models/chat_models.py +118 -227
  28. langchain_core/language_models/fake.py +11 -11
  29. langchain_core/language_models/fake_chat_models.py +35 -29
  30. langchain_core/language_models/llms.py +91 -201
  31. langchain_core/load/dump.py +1 -1
  32. langchain_core/load/load.py +11 -12
  33. langchain_core/load/mapping.py +2 -4
  34. langchain_core/load/serializable.py +2 -4
  35. langchain_core/messages/ai.py +17 -20
  36. langchain_core/messages/base.py +23 -25
  37. langchain_core/messages/block_translators/__init__.py +2 -5
  38. langchain_core/messages/block_translators/anthropic.py +3 -3
  39. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  40. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  41. langchain_core/messages/block_translators/openai.py +6 -6
  42. langchain_core/messages/content.py +120 -124
  43. langchain_core/messages/human.py +7 -7
  44. langchain_core/messages/system.py +7 -7
  45. langchain_core/messages/tool.py +24 -24
  46. langchain_core/messages/utils.py +67 -79
  47. langchain_core/output_parsers/base.py +12 -14
  48. langchain_core/output_parsers/json.py +4 -4
  49. langchain_core/output_parsers/list.py +3 -5
  50. langchain_core/output_parsers/openai_functions.py +3 -3
  51. langchain_core/output_parsers/openai_tools.py +3 -3
  52. langchain_core/output_parsers/pydantic.py +2 -2
  53. langchain_core/output_parsers/transform.py +13 -15
  54. langchain_core/output_parsers/xml.py +7 -9
  55. langchain_core/outputs/chat_generation.py +4 -4
  56. langchain_core/outputs/chat_result.py +1 -3
  57. langchain_core/outputs/generation.py +2 -2
  58. langchain_core/outputs/llm_result.py +5 -5
  59. langchain_core/prompts/__init__.py +1 -5
  60. langchain_core/prompts/base.py +10 -15
  61. langchain_core/prompts/chat.py +31 -82
  62. langchain_core/prompts/dict.py +2 -2
  63. langchain_core/prompts/few_shot.py +5 -5
  64. langchain_core/prompts/few_shot_with_templates.py +4 -4
  65. langchain_core/prompts/loading.py +3 -5
  66. langchain_core/prompts/prompt.py +4 -16
  67. langchain_core/prompts/string.py +2 -1
  68. langchain_core/prompts/structured.py +16 -23
  69. langchain_core/rate_limiters.py +3 -4
  70. langchain_core/retrievers.py +14 -14
  71. langchain_core/runnables/base.py +928 -1042
  72. langchain_core/runnables/branch.py +36 -40
  73. langchain_core/runnables/config.py +27 -35
  74. langchain_core/runnables/configurable.py +108 -124
  75. langchain_core/runnables/fallbacks.py +76 -72
  76. langchain_core/runnables/graph.py +39 -45
  77. langchain_core/runnables/graph_ascii.py +9 -11
  78. langchain_core/runnables/graph_mermaid.py +18 -19
  79. langchain_core/runnables/graph_png.py +8 -9
  80. langchain_core/runnables/history.py +114 -127
  81. langchain_core/runnables/passthrough.py +113 -139
  82. langchain_core/runnables/retry.py +43 -48
  83. langchain_core/runnables/router.py +23 -28
  84. langchain_core/runnables/schema.py +42 -44
  85. langchain_core/runnables/utils.py +28 -31
  86. langchain_core/stores.py +9 -13
  87. langchain_core/structured_query.py +8 -8
  88. langchain_core/tools/base.py +62 -115
  89. langchain_core/tools/convert.py +31 -35
  90. langchain_core/tools/render.py +1 -1
  91. langchain_core/tools/retriever.py +4 -4
  92. langchain_core/tools/simple.py +13 -17
  93. langchain_core/tools/structured.py +12 -15
  94. langchain_core/tracers/base.py +62 -64
  95. langchain_core/tracers/context.py +17 -35
  96. langchain_core/tracers/core.py +49 -53
  97. langchain_core/tracers/evaluation.py +11 -11
  98. langchain_core/tracers/event_stream.py +58 -60
  99. langchain_core/tracers/langchain.py +13 -13
  100. langchain_core/tracers/log_stream.py +22 -24
  101. langchain_core/tracers/root_listeners.py +14 -14
  102. langchain_core/tracers/run_collector.py +2 -4
  103. langchain_core/tracers/schemas.py +8 -8
  104. langchain_core/tracers/stdout.py +2 -1
  105. langchain_core/utils/__init__.py +0 -3
  106. langchain_core/utils/_merge.py +2 -2
  107. langchain_core/utils/aiter.py +24 -28
  108. langchain_core/utils/env.py +4 -4
  109. langchain_core/utils/function_calling.py +31 -41
  110. langchain_core/utils/html.py +3 -4
  111. langchain_core/utils/input.py +3 -3
  112. langchain_core/utils/iter.py +15 -19
  113. langchain_core/utils/json.py +3 -2
  114. langchain_core/utils/json_schema.py +6 -6
  115. langchain_core/utils/mustache.py +3 -5
  116. langchain_core/utils/pydantic.py +16 -18
  117. langchain_core/utils/usage.py +1 -1
  118. langchain_core/utils/utils.py +29 -29
  119. langchain_core/vectorstores/base.py +18 -21
  120. langchain_core/vectorstores/in_memory.py +14 -87
  121. langchain_core/vectorstores/utils.py +2 -2
  122. langchain_core/version.py +1 -1
  123. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a7.dist-info}/METADATA +10 -21
  124. langchain_core-1.0.0a7.dist-info/RECORD +176 -0
  125. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a7.dist-info}/WHEEL +1 -1
  126. langchain_core/messages/block_translators/ollama.py +0 -47
  127. langchain_core/prompts/pipeline.py +0 -138
  128. langchain_core/tracers/langchain_v1.py +0 -31
  129. langchain_core/utils/loading.py +0 -35
  130. langchain_core-1.0.0a6.dist-info/RECORD +0 -181
  131. langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
@@ -6,16 +6,20 @@ import hashlib
6
6
  import json
7
7
  import uuid
8
8
  import warnings
9
- from collections.abc import AsyncIterable, AsyncIterator, Iterable, Iterator, Sequence
9
+ from collections.abc import (
10
+ AsyncIterable,
11
+ AsyncIterator,
12
+ Callable,
13
+ Iterable,
14
+ Iterator,
15
+ Sequence,
16
+ )
10
17
  from itertools import islice
11
18
  from typing import (
12
19
  Any,
13
- Callable,
14
20
  Literal,
15
- Optional,
16
21
  TypedDict,
17
22
  TypeVar,
18
- Union,
19
23
  cast,
20
24
  )
21
25
 
@@ -107,8 +111,8 @@ async def _abatch(size: int, iterable: AsyncIterable[T]) -> AsyncIterator[list[T
107
111
 
108
112
 
109
113
  def _get_source_id_assigner(
110
- source_id_key: Union[str, Callable[[Document], str], None],
111
- ) -> Callable[[Document], Union[str, None]]:
114
+ source_id_key: str | Callable[[Document], str] | None,
115
+ ) -> Callable[[Document], str | None]:
112
116
  """Get the source id from the document."""
113
117
  if source_id_key is None:
114
118
  return lambda _doc: None
@@ -162,9 +166,8 @@ def _calculate_hash(
162
166
  def _get_document_with_hash(
163
167
  document: Document,
164
168
  *,
165
- key_encoder: Union[
166
- Callable[[Document], str], Literal["sha1", "sha256", "sha512", "blake2b"]
167
- ],
169
+ key_encoder: Callable[[Document], str]
170
+ | Literal["sha1", "sha256", "sha512", "blake2b"],
168
171
  ) -> Document:
169
172
  """Calculate a hash of the document, and assign it to the uid.
170
173
 
@@ -233,7 +236,7 @@ class _HashedDocument:
233
236
 
234
237
 
235
238
  def _delete(
236
- vector_store: Union[VectorStore, DocumentIndex],
239
+ vector_store: VectorStore | DocumentIndex,
237
240
  ids: list[str],
238
241
  ) -> None:
239
242
  if isinstance(vector_store, VectorStore):
@@ -271,19 +274,18 @@ class IndexingResult(TypedDict):
271
274
 
272
275
 
273
276
  def index(
274
- docs_source: Union[BaseLoader, Iterable[Document]],
277
+ docs_source: BaseLoader | Iterable[Document],
275
278
  record_manager: RecordManager,
276
- vector_store: Union[VectorStore, DocumentIndex],
279
+ vector_store: VectorStore | DocumentIndex,
277
280
  *,
278
281
  batch_size: int = 100,
279
- cleanup: Optional[Literal["incremental", "full", "scoped_full"]] = None,
280
- source_id_key: Union[str, Callable[[Document], str], None] = None,
282
+ cleanup: Literal["incremental", "full", "scoped_full"] | None = None,
283
+ source_id_key: str | Callable[[Document], str] | None = None,
281
284
  cleanup_batch_size: int = 1_000,
282
285
  force_update: bool = False,
283
- key_encoder: Union[
284
- Literal["sha1", "sha256", "sha512", "blake2b"], Callable[[Document], str]
285
- ] = "sha1",
286
- upsert_kwargs: Optional[dict[str, Any]] = None,
286
+ key_encoder: Literal["sha1", "sha256", "sha512", "blake2b"]
287
+ | Callable[[Document], str] = "sha1",
288
+ upsert_kwargs: dict[str, Any] | None = None,
287
289
  ) -> IndexingResult:
288
290
  """Index data from the loader into the vector store.
289
291
 
@@ -296,10 +298,10 @@ def index(
296
298
  For the time being, documents are indexed using their hashes, and users
297
299
  are not able to specify the uid of the document.
298
300
 
299
- .. versionchanged:: 0.3.25
301
+ !!! warning "Behavior changed in 0.3.25"
300
302
  Added ``scoped_full`` cleanup mode.
301
303
 
302
- .. important::
304
+ !!! important
303
305
 
304
306
  * In full mode, the loader should be returning
305
307
  the entire dataset, and not just a subset of the dataset.
@@ -350,7 +352,7 @@ def index(
350
352
  metadata. Default is "sha1".
351
353
  Other options include "blake2b", "sha256", and "sha512".
352
354
 
353
- .. versionadded:: 0.3.66
355
+ !!! version-added "Added in version 0.3.66"
354
356
 
355
357
  key_encoder: Hashing algorithm to use for hashing the document.
356
358
  If not provided, a default encoder using SHA-1 will be used.
@@ -367,7 +369,7 @@ def index(
367
369
  method of the VectorStore or the upsert method of the DocumentIndex.
368
370
  For example, you can use this to specify a custom vector_field:
369
371
  upsert_kwargs={"vector_field": "embedding"}
370
- .. versionadded:: 0.3.10
372
+ !!! version-added "Added in version 0.3.10"
371
373
 
372
374
  Returns:
373
375
  Indexing result which contains information about how many documents
@@ -462,13 +464,13 @@ def index(
462
464
  # Count documents removed by within-batch deduplication
463
465
  num_skipped += original_batch_size - len(hashed_docs)
464
466
 
465
- source_ids: Sequence[Optional[str]] = [
467
+ source_ids: Sequence[str | None] = [
466
468
  source_id_assigner(hashed_doc) for hashed_doc in hashed_docs
467
469
  ]
468
470
 
469
471
  if cleanup in {"incremental", "scoped_full"}:
470
472
  # source ids are required.
471
- for source_id, hashed_doc in zip(source_ids, hashed_docs):
473
+ for source_id, hashed_doc in zip(source_ids, hashed_docs, strict=False):
472
474
  if source_id is None:
473
475
  msg = (
474
476
  f"Source ids are required when cleanup mode is "
@@ -492,7 +494,7 @@ def index(
492
494
  docs_to_index = []
493
495
  uids_to_refresh = []
494
496
  seen_docs: set[str] = set()
495
- for hashed_doc, doc_exists in zip(hashed_docs, exists_batch):
497
+ for hashed_doc, doc_exists in zip(hashed_docs, exists_batch, strict=False):
496
498
  hashed_id = cast("str", hashed_doc.id)
497
499
  if doc_exists:
498
500
  if force_update:
@@ -563,7 +565,7 @@ def index(
563
565
  if cleanup == "full" or (
564
566
  cleanup == "scoped_full" and scoped_full_cleanup_source_ids
565
567
  ):
566
- delete_group_ids: Optional[Sequence[str]] = None
568
+ delete_group_ids: Sequence[str] | None = None
567
569
  if cleanup == "scoped_full":
568
570
  delete_group_ids = list(scoped_full_cleanup_source_ids)
569
571
  while uids_to_delete := record_manager.list_keys(
@@ -591,7 +593,7 @@ async def _to_async_iterator(iterator: Iterable[T]) -> AsyncIterator[T]:
591
593
 
592
594
 
593
595
  async def _adelete(
594
- vector_store: Union[VectorStore, DocumentIndex],
596
+ vector_store: VectorStore | DocumentIndex,
595
597
  ids: list[str],
596
598
  ) -> None:
597
599
  if isinstance(vector_store, VectorStore):
@@ -613,19 +615,18 @@ async def _adelete(
613
615
 
614
616
 
615
617
  async def aindex(
616
- docs_source: Union[BaseLoader, Iterable[Document], AsyncIterator[Document]],
618
+ docs_source: BaseLoader | Iterable[Document] | AsyncIterator[Document],
617
619
  record_manager: RecordManager,
618
- vector_store: Union[VectorStore, DocumentIndex],
620
+ vector_store: VectorStore | DocumentIndex,
619
621
  *,
620
622
  batch_size: int = 100,
621
- cleanup: Optional[Literal["incremental", "full", "scoped_full"]] = None,
622
- source_id_key: Union[str, Callable[[Document], str], None] = None,
623
+ cleanup: Literal["incremental", "full", "scoped_full"] | None = None,
624
+ source_id_key: str | Callable[[Document], str] | None = None,
623
625
  cleanup_batch_size: int = 1_000,
624
626
  force_update: bool = False,
625
- key_encoder: Union[
626
- Literal["sha1", "sha256", "sha512", "blake2b"], Callable[[Document], str]
627
- ] = "sha1",
628
- upsert_kwargs: Optional[dict[str, Any]] = None,
627
+ key_encoder: Literal["sha1", "sha256", "sha512", "blake2b"]
628
+ | Callable[[Document], str] = "sha1",
629
+ upsert_kwargs: dict[str, Any] | None = None,
629
630
  ) -> IndexingResult:
630
631
  """Async index data from the loader into the vector store.
631
632
 
@@ -638,10 +639,10 @@ async def aindex(
638
639
  For the time being, documents are indexed using their hashes, and users
639
640
  are not able to specify the uid of the document.
640
641
 
641
- .. versionchanged:: 0.3.25
642
+ !!! warning "Behavior changed in 0.3.25"
642
643
  Added ``scoped_full`` cleanup mode.
643
644
 
644
- .. important::
645
+ !!! important
645
646
 
646
647
  * In full mode, the loader should be returning
647
648
  the entire dataset, and not just a subset of the dataset.
@@ -692,7 +693,7 @@ async def aindex(
692
693
  metadata. Default is "sha1".
693
694
  Other options include "blake2b", "sha256", and "sha512".
694
695
 
695
- .. versionadded:: 0.3.66
696
+ !!! version-added "Added in version 0.3.66"
696
697
 
697
698
  key_encoder: Hashing algorithm to use for hashing the document.
698
699
  If not provided, a default encoder using SHA-1 will be used.
@@ -709,7 +710,7 @@ async def aindex(
709
710
  method of the VectorStore or the upsert method of the DocumentIndex.
710
711
  For example, you can use this to specify a custom vector_field:
711
712
  upsert_kwargs={"vector_field": "embedding"}
712
- .. versionadded:: 0.3.10
713
+ !!! version-added "Added in version 0.3.10"
713
714
 
714
715
  Returns:
715
716
  Indexing result which contains information about how many documents
@@ -815,13 +816,13 @@ async def aindex(
815
816
  # Count documents removed by within-batch deduplication
816
817
  num_skipped += original_batch_size - len(hashed_docs)
817
818
 
818
- source_ids: Sequence[Optional[str]] = [
819
+ source_ids: Sequence[str | None] = [
819
820
  source_id_assigner(doc) for doc in hashed_docs
820
821
  ]
821
822
 
822
823
  if cleanup in {"incremental", "scoped_full"}:
823
824
  # If the cleanup mode is incremental, source ids are required.
824
- for source_id, hashed_doc in zip(source_ids, hashed_docs):
825
+ for source_id, hashed_doc in zip(source_ids, hashed_docs, strict=False):
825
826
  if source_id is None:
826
827
  msg = (
827
828
  f"Source ids are required when cleanup mode is "
@@ -845,7 +846,7 @@ async def aindex(
845
846
  docs_to_index: list[Document] = []
846
847
  uids_to_refresh = []
847
848
  seen_docs: set[str] = set()
848
- for hashed_doc, doc_exists in zip(hashed_docs, exists_batch):
849
+ for hashed_doc, doc_exists in zip(hashed_docs, exists_batch, strict=False):
849
850
  hashed_id = cast("str", hashed_doc.id)
850
851
  if doc_exists:
851
852
  if force_update:
@@ -917,7 +918,7 @@ async def aindex(
917
918
  if cleanup == "full" or (
918
919
  cleanup == "scoped_full" and scoped_full_cleanup_source_ids
919
920
  ):
920
- delete_group_ids: Optional[Sequence[str]] = None
921
+ delete_group_ids: Sequence[str] | None = None
921
922
  if cleanup == "scoped_full":
922
923
  delete_group_ids = list(scoped_full_cleanup_source_ids)
923
924
  while uids_to_delete := await record_manager.alist_keys(
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
  import abc
6
6
  import time
7
7
  from abc import ABC, abstractmethod
8
- from typing import TYPE_CHECKING, Any, Optional, TypedDict
8
+ from typing import TYPE_CHECKING, Any, TypedDict
9
9
 
10
10
  from typing_extensions import override
11
11
 
@@ -100,8 +100,8 @@ class RecordManager(ABC):
100
100
  self,
101
101
  keys: Sequence[str],
102
102
  *,
103
- group_ids: Optional[Sequence[Optional[str]]] = None,
104
- time_at_least: Optional[float] = None,
103
+ group_ids: Sequence[str | None] | None = None,
104
+ time_at_least: float | None = None,
105
105
  ) -> None:
106
106
  """Upsert records into the database.
107
107
 
@@ -128,8 +128,8 @@ class RecordManager(ABC):
128
128
  self,
129
129
  keys: Sequence[str],
130
130
  *,
131
- group_ids: Optional[Sequence[Optional[str]]] = None,
132
- time_at_least: Optional[float] = None,
131
+ group_ids: Sequence[str | None] | None = None,
132
+ time_at_least: float | None = None,
133
133
  ) -> None:
134
134
  """Asynchronously upsert records into the database.
135
135
 
@@ -177,10 +177,10 @@ class RecordManager(ABC):
177
177
  def list_keys(
178
178
  self,
179
179
  *,
180
- before: Optional[float] = None,
181
- after: Optional[float] = None,
182
- group_ids: Optional[Sequence[str]] = None,
183
- limit: Optional[int] = None,
180
+ before: float | None = None,
181
+ after: float | None = None,
182
+ group_ids: Sequence[str] | None = None,
183
+ limit: int | None = None,
184
184
  ) -> list[str]:
185
185
  """List records in the database based on the provided filters.
186
186
 
@@ -198,10 +198,10 @@ class RecordManager(ABC):
198
198
  async def alist_keys(
199
199
  self,
200
200
  *,
201
- before: Optional[float] = None,
202
- after: Optional[float] = None,
203
- group_ids: Optional[Sequence[str]] = None,
204
- limit: Optional[int] = None,
201
+ before: float | None = None,
202
+ after: float | None = None,
203
+ group_ids: Sequence[str] | None = None,
204
+ limit: int | None = None,
205
205
  ) -> list[str]:
206
206
  """Asynchronously list records in the database based on the provided filters.
207
207
 
@@ -233,7 +233,7 @@ class RecordManager(ABC):
233
233
 
234
234
 
235
235
  class _Record(TypedDict):
236
- group_id: Optional[str]
236
+ group_id: str | None
237
237
  updated_at: float
238
238
 
239
239
 
@@ -270,8 +270,8 @@ class InMemoryRecordManager(RecordManager):
270
270
  self,
271
271
  keys: Sequence[str],
272
272
  *,
273
- group_ids: Optional[Sequence[Optional[str]]] = None,
274
- time_at_least: Optional[float] = None,
273
+ group_ids: Sequence[str | None] | None = None,
274
+ time_at_least: float | None = None,
275
275
  ) -> None:
276
276
  """Upsert records into the database.
277
277
 
@@ -307,8 +307,8 @@ class InMemoryRecordManager(RecordManager):
307
307
  self,
308
308
  keys: Sequence[str],
309
309
  *,
310
- group_ids: Optional[Sequence[Optional[str]]] = None,
311
- time_at_least: Optional[float] = None,
310
+ group_ids: Sequence[str | None] | None = None,
311
+ time_at_least: float | None = None,
312
312
  ) -> None:
313
313
  """Async upsert records into the database.
314
314
 
@@ -352,10 +352,10 @@ class InMemoryRecordManager(RecordManager):
352
352
  def list_keys(
353
353
  self,
354
354
  *,
355
- before: Optional[float] = None,
356
- after: Optional[float] = None,
357
- group_ids: Optional[Sequence[str]] = None,
358
- limit: Optional[int] = None,
355
+ before: float | None = None,
356
+ after: float | None = None,
357
+ group_ids: Sequence[str] | None = None,
358
+ limit: int | None = None,
359
359
  ) -> list[str]:
360
360
  """List records in the database based on the provided filters.
361
361
 
@@ -388,10 +388,10 @@ class InMemoryRecordManager(RecordManager):
388
388
  async def alist_keys(
389
389
  self,
390
390
  *,
391
- before: Optional[float] = None,
392
- after: Optional[float] = None,
393
- group_ids: Optional[Sequence[str]] = None,
394
- limit: Optional[int] = None,
391
+ before: float | None = None,
392
+ after: float | None = None,
393
+ group_ids: Sequence[str] | None = None,
394
+ limit: int | None = None,
395
395
  ) -> list[str]:
396
396
  """Async list records in the database based on the provided filters.
397
397
 
@@ -485,7 +485,7 @@ class DeleteResponse(TypedDict, total=False):
485
485
  failed: Sequence[str]
486
486
  """The IDs that failed to be deleted.
487
487
 
488
- .. warning::
488
+ !!! warning
489
489
  Deleting an ID that does not exist is **NOT** considered a failure.
490
490
  """
491
491
 
@@ -509,7 +509,7 @@ class DocumentIndex(BaseRetriever):
509
509
  2. Fetching document by ID.
510
510
  3. Searching for document using a query.
511
511
 
512
- .. versionadded:: 0.2.29
512
+ !!! version-added "Added in version 0.2.29"
513
513
  """
514
514
 
515
515
  @abc.abstractmethod
@@ -564,7 +564,7 @@ class DocumentIndex(BaseRetriever):
564
564
  )
565
565
 
566
566
  @abc.abstractmethod
567
- def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
567
+ def delete(self, ids: list[str] | None = None, **kwargs: Any) -> DeleteResponse:
568
568
  """Delete by IDs or other criteria.
569
569
 
570
570
  Calling delete without any input parameters should raise a ValueError!
@@ -581,7 +581,7 @@ class DocumentIndex(BaseRetriever):
581
581
  """
582
582
 
583
583
  async def adelete(
584
- self, ids: Optional[list[str]] = None, **kwargs: Any
584
+ self, ids: list[str] | None = None, **kwargs: Any
585
585
  ) -> DeleteResponse:
586
586
  """Delete by IDs or other criteria. Async variant.
587
587
 
@@ -3,7 +3,7 @@
3
3
  import operator
4
4
  import uuid
5
5
  from collections.abc import Sequence
6
- from typing import Any, Optional, cast
6
+ from typing import Any, cast
7
7
 
8
8
  from pydantic import Field
9
9
  from typing_extensions import override
@@ -24,7 +24,7 @@ class InMemoryDocumentIndex(DocumentIndex):
24
24
  It provides a simple search API that returns documents by the number of
25
25
  counts the given query appears in the document.
26
26
 
27
- .. versionadded:: 0.2.29
27
+ !!! version-added "Added in version 0.2.29"
28
28
  """
29
29
 
30
30
  store: dict[str, Document] = Field(default_factory=dict)
@@ -60,7 +60,7 @@ class InMemoryDocumentIndex(DocumentIndex):
60
60
  return UpsertResponse(succeeded=ok_ids, failed=[])
61
61
 
62
62
  @override
63
- def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
63
+ def delete(self, ids: list[str] | None = None, **kwargs: Any) -> DeleteResponse:
64
64
  """Delete by IDs.
65
65
 
66
66
  Args:
@@ -3,10 +3,8 @@ from collections.abc import Sequence
3
3
  from typing import (
4
4
  TYPE_CHECKING,
5
5
  Literal,
6
- Optional,
7
6
  TypedDict,
8
7
  TypeVar,
9
- Union,
10
8
  )
11
9
 
12
10
  if TYPE_CHECKING:
@@ -17,7 +15,7 @@ from langchain_core.messages.content import (
17
15
 
18
16
 
19
17
  def is_openai_data_block(
20
- block: dict, filter_: Union[Literal["image", "audio", "file"], None] = None
18
+ block: dict, filter_: Literal["image", "audio", "file"] | None = None
21
19
  ) -> bool:
22
20
  """Check whether a block contains multimodal data in OpenAI Chat Completions format.
23
21
 
@@ -88,7 +86,7 @@ class ParsedDataUri(TypedDict):
88
86
  mime_type: str
89
87
 
90
88
 
91
- def _parse_data_uri(uri: str) -> Optional[ParsedDataUri]:
89
+ def _parse_data_uri(uri: str) -> ParsedDataUri | None:
92
90
  """Parse a data URI into its components.
93
91
 
94
92
  If parsing fails, return None. If either MIME type or data is missing, return None.
@@ -142,13 +140,13 @@ def _normalize_messages(
142
140
  directly; this may change in the future
143
141
  - LangChain v0 standard content blocks for backward compatibility
144
142
 
145
- .. versionchanged:: 1.0.0
143
+ !!! warning "Behavior changed in 1.0.0"
146
144
  In previous versions, this function returned messages in LangChain v0 format.
147
145
  Now, it returns messages in LangChain v1 format, which upgraded chat models now
148
146
  expect to receive when passing back in message history. For backward
149
147
  compatibility, this function will convert v0 message content to v1 format.
150
148
 
151
- .. dropdown:: v0 Content Block Schemas
149
+ ??? note "v0 Content Block Schemas"
152
150
 
153
151
  ``URLContentBlock``:
154
152
 
@@ -304,7 +302,7 @@ def _ensure_message_copy(message: T, formatted_message: T) -> T:
304
302
 
305
303
 
306
304
  def _update_content_block(
307
- formatted_message: "BaseMessage", idx: int, new_block: Union[ContentBlock, dict]
305
+ formatted_message: "BaseMessage", idx: int, new_block: ContentBlock | dict
308
306
  ) -> None:
309
307
  """Update a content block at the given index, handling type issues."""
310
308
  # Type ignore needed because: