langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (142) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +10 -9
  7. langchain_core/caches.py +46 -56
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +43 -58
  16. langchain_core/document_loaders/base.py +21 -21
  17. langchain_core/document_loaders/langsmith.py +22 -22
  18. langchain_core/documents/__init__.py +0 -1
  19. langchain_core/documents/base.py +46 -49
  20. langchain_core/documents/transformers.py +28 -29
  21. langchain_core/embeddings/fake.py +50 -54
  22. langchain_core/example_selectors/semantic_similarity.py +4 -6
  23. langchain_core/exceptions.py +7 -8
  24. langchain_core/indexing/api.py +19 -25
  25. langchain_core/indexing/base.py +24 -24
  26. langchain_core/language_models/__init__.py +11 -27
  27. langchain_core/language_models/_utils.py +53 -54
  28. langchain_core/language_models/base.py +30 -24
  29. langchain_core/language_models/chat_models.py +123 -148
  30. langchain_core/language_models/fake_chat_models.py +7 -7
  31. langchain_core/language_models/llms.py +14 -16
  32. langchain_core/load/dump.py +3 -4
  33. langchain_core/load/load.py +7 -16
  34. langchain_core/load/serializable.py +37 -36
  35. langchain_core/messages/__init__.py +1 -16
  36. langchain_core/messages/ai.py +122 -123
  37. langchain_core/messages/base.py +31 -31
  38. langchain_core/messages/block_translators/__init__.py +17 -17
  39. langchain_core/messages/block_translators/anthropic.py +3 -3
  40. langchain_core/messages/block_translators/bedrock_converse.py +3 -3
  41. langchain_core/messages/block_translators/google_genai.py +5 -4
  42. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  43. langchain_core/messages/block_translators/groq.py +117 -21
  44. langchain_core/messages/block_translators/langchain_v0.py +3 -3
  45. langchain_core/messages/block_translators/openai.py +5 -5
  46. langchain_core/messages/chat.py +2 -6
  47. langchain_core/messages/content.py +222 -209
  48. langchain_core/messages/function.py +6 -10
  49. langchain_core/messages/human.py +17 -24
  50. langchain_core/messages/modifier.py +2 -2
  51. langchain_core/messages/system.py +12 -22
  52. langchain_core/messages/tool.py +53 -69
  53. langchain_core/messages/utils.py +399 -417
  54. langchain_core/output_parsers/__init__.py +1 -14
  55. langchain_core/output_parsers/base.py +46 -47
  56. langchain_core/output_parsers/json.py +3 -4
  57. langchain_core/output_parsers/list.py +2 -2
  58. langchain_core/output_parsers/openai_functions.py +46 -44
  59. langchain_core/output_parsers/openai_tools.py +11 -16
  60. langchain_core/output_parsers/pydantic.py +10 -11
  61. langchain_core/output_parsers/string.py +2 -2
  62. langchain_core/output_parsers/transform.py +2 -2
  63. langchain_core/output_parsers/xml.py +1 -1
  64. langchain_core/outputs/__init__.py +1 -1
  65. langchain_core/outputs/chat_generation.py +14 -14
  66. langchain_core/outputs/generation.py +6 -6
  67. langchain_core/outputs/llm_result.py +5 -5
  68. langchain_core/prompt_values.py +11 -11
  69. langchain_core/prompts/__init__.py +3 -23
  70. langchain_core/prompts/base.py +33 -38
  71. langchain_core/prompts/chat.py +222 -229
  72. langchain_core/prompts/dict.py +3 -3
  73. langchain_core/prompts/few_shot.py +76 -83
  74. langchain_core/prompts/few_shot_with_templates.py +7 -9
  75. langchain_core/prompts/image.py +12 -14
  76. langchain_core/prompts/loading.py +1 -1
  77. langchain_core/prompts/message.py +3 -3
  78. langchain_core/prompts/prompt.py +20 -23
  79. langchain_core/prompts/string.py +20 -8
  80. langchain_core/prompts/structured.py +26 -27
  81. langchain_core/rate_limiters.py +50 -58
  82. langchain_core/retrievers.py +41 -182
  83. langchain_core/runnables/base.py +565 -597
  84. langchain_core/runnables/branch.py +8 -8
  85. langchain_core/runnables/config.py +37 -44
  86. langchain_core/runnables/configurable.py +9 -10
  87. langchain_core/runnables/fallbacks.py +9 -9
  88. langchain_core/runnables/graph.py +46 -50
  89. langchain_core/runnables/graph_ascii.py +19 -18
  90. langchain_core/runnables/graph_mermaid.py +20 -31
  91. langchain_core/runnables/graph_png.py +7 -7
  92. langchain_core/runnables/history.py +22 -22
  93. langchain_core/runnables/passthrough.py +11 -11
  94. langchain_core/runnables/retry.py +3 -3
  95. langchain_core/runnables/router.py +2 -2
  96. langchain_core/runnables/schema.py +33 -33
  97. langchain_core/runnables/utils.py +30 -34
  98. langchain_core/stores.py +72 -102
  99. langchain_core/sys_info.py +27 -29
  100. langchain_core/tools/__init__.py +1 -14
  101. langchain_core/tools/base.py +70 -71
  102. langchain_core/tools/convert.py +100 -104
  103. langchain_core/tools/render.py +9 -9
  104. langchain_core/tools/retriever.py +7 -7
  105. langchain_core/tools/simple.py +6 -7
  106. langchain_core/tools/structured.py +18 -24
  107. langchain_core/tracers/__init__.py +1 -9
  108. langchain_core/tracers/base.py +35 -35
  109. langchain_core/tracers/context.py +12 -17
  110. langchain_core/tracers/event_stream.py +3 -3
  111. langchain_core/tracers/langchain.py +8 -8
  112. langchain_core/tracers/log_stream.py +17 -18
  113. langchain_core/tracers/memory_stream.py +3 -3
  114. langchain_core/tracers/root_listeners.py +2 -2
  115. langchain_core/tracers/schemas.py +0 -129
  116. langchain_core/tracers/stdout.py +1 -2
  117. langchain_core/utils/__init__.py +1 -1
  118. langchain_core/utils/aiter.py +32 -32
  119. langchain_core/utils/env.py +5 -5
  120. langchain_core/utils/function_calling.py +59 -154
  121. langchain_core/utils/html.py +4 -4
  122. langchain_core/utils/input.py +3 -3
  123. langchain_core/utils/interactive_env.py +1 -1
  124. langchain_core/utils/iter.py +20 -20
  125. langchain_core/utils/json.py +1 -1
  126. langchain_core/utils/json_schema.py +2 -2
  127. langchain_core/utils/mustache.py +5 -5
  128. langchain_core/utils/pydantic.py +17 -17
  129. langchain_core/utils/strings.py +5 -5
  130. langchain_core/utils/utils.py +25 -28
  131. langchain_core/vectorstores/base.py +55 -87
  132. langchain_core/vectorstores/in_memory.py +83 -85
  133. langchain_core/vectorstores/utils.py +2 -2
  134. langchain_core/version.py +1 -1
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +23 -11
  136. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  137. langchain_core/memory.py +0 -120
  138. langchain_core/pydantic_v1/__init__.py +0 -30
  139. langchain_core/pydantic_v1/dataclasses.py +0 -23
  140. langchain_core/pydantic_v1/main.py +0 -23
  141. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  142. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -3,21 +3,7 @@
3
3
  One of the most common ways to store and search over unstructured data is to
4
4
  embed it and store the resulting embedding vectors, and then query the store
5
5
  and retrieve the data that are 'most similar' to the embedded query.
6
-
7
- **Class hierarchy:**
8
-
9
- .. code-block::
10
-
11
- VectorStore --> <name> # Examples: Annoy, FAISS, Milvus
12
-
13
- BaseRetriever --> VectorStoreRetriever --> <name>Retriever # Example: VespaRetriever
14
-
15
- **Main helpers:**
16
-
17
- .. code-block::
18
-
19
- Embeddings, Document
20
- """ # noqa: E501
6
+ """
21
7
 
22
8
  from __future__ import annotations
23
9
 
@@ -123,12 +109,11 @@ class VectorStore(ABC):
123
109
  """Delete by vector ID or other criteria.
124
110
 
125
111
  Args:
126
- ids: List of ids to delete. If None, delete all. Default is None.
112
+ ids: List of ids to delete. If `None`, delete all.
127
113
  **kwargs: Other keyword arguments that subclasses might use.
128
114
 
129
115
  Returns:
130
- Optional[bool]: True if deletion is successful,
131
- False otherwise, None if not implemented.
116
+ True if deletion is successful, False otherwise, None if not implemented.
132
117
  """
133
118
  msg = "delete method must be implemented by subclass."
134
119
  raise NotImplementedError(msg)
@@ -191,12 +176,11 @@ class VectorStore(ABC):
191
176
  """Async delete by vector ID or other criteria.
192
177
 
193
178
  Args:
194
- ids: List of ids to delete. If None, delete all. Default is None.
179
+ ids: List of ids to delete. If `None`, delete all.
195
180
  **kwargs: Other keyword arguments that subclasses might use.
196
181
 
197
182
  Returns:
198
- Optional[bool]: True if deletion is successful,
199
- False otherwise, None if not implemented.
183
+ True if deletion is successful, False otherwise, None if not implemented.
200
184
  """
201
185
  return await run_in_executor(None, self.delete, ids, **kwargs)
202
186
 
@@ -213,7 +197,6 @@ class VectorStore(ABC):
213
197
  Args:
214
198
  texts: Iterable of strings to add to the vectorstore.
215
199
  metadatas: Optional list of metadatas associated with the texts.
216
- Default is None.
217
200
  ids: Optional list
218
201
  **kwargs: vectorstore specific parameters.
219
202
 
@@ -255,7 +238,7 @@ class VectorStore(ABC):
255
238
 
256
239
  Args:
257
240
  documents: Documents to add to the vectorstore.
258
- kwargs: Additional keyword arguments.
241
+ **kwargs: Additional keyword arguments.
259
242
  if kwargs contains ids and documents contain ids,
260
243
  the ids in the kwargs will receive precedence.
261
244
 
@@ -287,7 +270,7 @@ class VectorStore(ABC):
287
270
 
288
271
  Args:
289
272
  documents: Documents to add to the vectorstore.
290
- kwargs: Additional keyword arguments.
273
+ **kwargs: Additional keyword arguments.
291
274
 
292
275
  Returns:
293
276
  List of IDs of the added texts.
@@ -381,7 +364,7 @@ class VectorStore(ABC):
381
364
 
382
365
  Args:
383
366
  query: Input text.
384
- k: Number of Documents to return. Defaults to 4.
367
+ k: Number of Documents to return.
385
368
  **kwargs: Arguments to pass to the search method.
386
369
 
387
370
  Returns:
@@ -478,7 +461,7 @@ class VectorStore(ABC):
478
461
 
479
462
  Args:
480
463
  query: Input text.
481
- k: Number of Documents to return. Defaults to 4.
464
+ k: Number of Documents to return.
482
465
  **kwargs: kwargs to be passed to similarity search. Should include:
483
466
  score_threshold: Optional, a floating point value between 0 to 1 to
484
467
  filter the resulting set of retrieved docs
@@ -505,7 +488,7 @@ class VectorStore(ABC):
505
488
 
506
489
  Args:
507
490
  query: Input text.
508
- k: Number of Documents to return. Defaults to 4.
491
+ k: Number of Documents to return.
509
492
  **kwargs: kwargs to be passed to similarity search. Should include:
510
493
  score_threshold: Optional, a floating point value between 0 to 1 to
511
494
  filter the resulting set of retrieved docs
@@ -529,7 +512,7 @@ class VectorStore(ABC):
529
512
 
530
513
  Args:
531
514
  query: Input text.
532
- k: Number of Documents to return. Defaults to 4.
515
+ k: Number of Documents to return.
533
516
  **kwargs: kwargs to be passed to similarity search. Should include:
534
517
  score_threshold: Optional, a floating point value between 0 to 1 to
535
518
  filter the resulting set of retrieved docs.
@@ -578,7 +561,7 @@ class VectorStore(ABC):
578
561
 
579
562
  Args:
580
563
  query: Input text.
581
- k: Number of Documents to return. Defaults to 4.
564
+ k: Number of Documents to return.
582
565
  **kwargs: kwargs to be passed to similarity search. Should include:
583
566
  score_threshold: Optional, a floating point value between 0 to 1 to
584
567
  filter the resulting set of retrieved docs
@@ -622,7 +605,7 @@ class VectorStore(ABC):
622
605
 
623
606
  Args:
624
607
  query: Input text.
625
- k: Number of Documents to return. Defaults to 4.
608
+ k: Number of Documents to return.
626
609
  **kwargs: Arguments to pass to the search method.
627
610
 
628
611
  Returns:
@@ -640,7 +623,7 @@ class VectorStore(ABC):
640
623
 
641
624
  Args:
642
625
  embedding: Embedding to look up documents similar to.
643
- k: Number of Documents to return. Defaults to 4.
626
+ k: Number of Documents to return.
644
627
  **kwargs: Arguments to pass to the search method.
645
628
 
646
629
  Returns:
@@ -655,7 +638,7 @@ class VectorStore(ABC):
655
638
 
656
639
  Args:
657
640
  embedding: Embedding to look up documents similar to.
658
- k: Number of Documents to return. Defaults to 4.
641
+ k: Number of Documents to return.
659
642
  **kwargs: Arguments to pass to the search method.
660
643
 
661
644
  Returns:
@@ -683,13 +666,11 @@ class VectorStore(ABC):
683
666
 
684
667
  Args:
685
668
  query: Text to look up documents similar to.
686
- k: Number of Documents to return. Defaults to 4.
669
+ k: Number of Documents to return.
687
670
  fetch_k: Number of Documents to fetch to pass to MMR algorithm.
688
- Default is 20.
689
671
  lambda_mult: Number between 0 and 1 that determines the degree
690
672
  of diversity among the results with 0 corresponding
691
673
  to maximum diversity and 1 to minimum diversity.
692
- Defaults to 0.5.
693
674
  **kwargs: Arguments to pass to the search method.
694
675
 
695
676
  Returns:
@@ -712,13 +693,11 @@ class VectorStore(ABC):
712
693
 
713
694
  Args:
714
695
  query: Text to look up documents similar to.
715
- k: Number of Documents to return. Defaults to 4.
696
+ k: Number of Documents to return.
716
697
  fetch_k: Number of Documents to fetch to pass to MMR algorithm.
717
- Default is 20.
718
698
  lambda_mult: Number between 0 and 1 that determines the degree
719
699
  of diversity among the results with 0 corresponding
720
700
  to maximum diversity and 1 to minimum diversity.
721
- Defaults to 0.5.
722
701
  **kwargs: Arguments to pass to the search method.
723
702
 
724
703
  Returns:
@@ -752,13 +731,11 @@ class VectorStore(ABC):
752
731
 
753
732
  Args:
754
733
  embedding: Embedding to look up documents similar to.
755
- k: Number of Documents to return. Defaults to 4.
734
+ k: Number of Documents to return.
756
735
  fetch_k: Number of Documents to fetch to pass to MMR algorithm.
757
- Default is 20.
758
736
  lambda_mult: Number between 0 and 1 that determines the degree
759
737
  of diversity among the results with 0 corresponding
760
738
  to maximum diversity and 1 to minimum diversity.
761
- Defaults to 0.5.
762
739
  **kwargs: Arguments to pass to the search method.
763
740
 
764
741
  Returns:
@@ -781,13 +758,11 @@ class VectorStore(ABC):
781
758
 
782
759
  Args:
783
760
  embedding: Embedding to look up documents similar to.
784
- k: Number of Documents to return. Defaults to 4.
761
+ k: Number of Documents to return.
785
762
  fetch_k: Number of Documents to fetch to pass to MMR algorithm.
786
- Default is 20.
787
763
  lambda_mult: Number between 0 and 1 that determines the degree
788
764
  of diversity among the results with 0 corresponding
789
765
  to maximum diversity and 1 to minimum diversity.
790
- Defaults to 0.5.
791
766
  **kwargs: Arguments to pass to the search method.
792
767
 
793
768
  Returns:
@@ -815,10 +790,10 @@ class VectorStore(ABC):
815
790
  Args:
816
791
  documents: List of Documents to add to the vectorstore.
817
792
  embedding: Embedding function to use.
818
- kwargs: Additional keyword arguments.
793
+ **kwargs: Additional keyword arguments.
819
794
 
820
795
  Returns:
821
- VectorStore: VectorStore initialized from documents and embeddings.
796
+ VectorStore initialized from documents and embeddings.
822
797
  """
823
798
  texts = [d.page_content for d in documents]
824
799
  metadatas = [d.metadata for d in documents]
@@ -845,10 +820,10 @@ class VectorStore(ABC):
845
820
  Args:
846
821
  documents: List of Documents to add to the vectorstore.
847
822
  embedding: Embedding function to use.
848
- kwargs: Additional keyword arguments.
823
+ **kwargs: Additional keyword arguments.
849
824
 
850
825
  Returns:
851
- VectorStore: VectorStore initialized from documents and embeddings.
826
+ VectorStore initialized from documents and embeddings.
852
827
  """
853
828
  texts = [d.page_content for d in documents]
854
829
  metadatas = [d.metadata for d in documents]
@@ -880,12 +855,11 @@ class VectorStore(ABC):
880
855
  texts: Texts to add to the vectorstore.
881
856
  embedding: Embedding function to use.
882
857
  metadatas: Optional list of metadatas associated with the texts.
883
- Default is None.
884
858
  ids: Optional list of IDs associated with the texts.
885
- kwargs: Additional keyword arguments.
859
+ **kwargs: Additional keyword arguments.
886
860
 
887
861
  Returns:
888
- VectorStore: VectorStore initialized from texts and embeddings.
862
+ VectorStore initialized from texts and embeddings.
889
863
  """
890
864
 
891
865
  @classmethod
@@ -904,12 +878,11 @@ class VectorStore(ABC):
904
878
  texts: Texts to add to the vectorstore.
905
879
  embedding: Embedding function to use.
906
880
  metadatas: Optional list of metadatas associated with the texts.
907
- Default is None.
908
881
  ids: Optional list of IDs associated with the texts.
909
- kwargs: Additional keyword arguments.
882
+ **kwargs: Additional keyword arguments.
910
883
 
911
884
  Returns:
912
- VectorStore: VectorStore initialized from texts and embeddings.
885
+ VectorStore initialized from texts and embeddings.
913
886
  """
914
887
  if ids is not None:
915
888
  kwargs["ids"] = ids
@@ -930,12 +903,11 @@ class VectorStore(ABC):
930
903
  Args:
931
904
  **kwargs: Keyword arguments to pass to the search function.
932
905
  Can include:
933
- search_type (Optional[str]): Defines the type of search that
934
- the Retriever should perform.
935
- Can be "similarity" (default), "mmr", or
906
+ search_type: Defines the type of search that the Retriever should
907
+ perform. Can be "similarity" (default), "mmr", or
936
908
  "similarity_score_threshold".
937
- search_kwargs (Optional[Dict]): Keyword arguments to pass to the
938
- search function. Can include things like:
909
+ search_kwargs: Keyword arguments to pass to the search function. Can
910
+ include things like:
939
911
  k: Amount of documents to return (Default: 4)
940
912
  score_threshold: Minimum relevance threshold
941
913
  for similarity_score_threshold
@@ -946,39 +918,35 @@ class VectorStore(ABC):
946
918
  filter: Filter by document metadata
947
919
 
948
920
  Returns:
949
- VectorStoreRetriever: Retriever class for VectorStore.
921
+ Retriever class for VectorStore.
950
922
 
951
923
  Examples:
924
+ ```python
925
+ # Retrieve more documents with higher diversity
926
+ # Useful if your dataset has many similar documents
927
+ docsearch.as_retriever(
928
+ search_type="mmr", search_kwargs={"k": 6, "lambda_mult": 0.25}
929
+ )
952
930
 
953
- .. code-block:: python
954
-
955
- # Retrieve more documents with higher diversity
956
- # Useful if your dataset has many similar documents
957
- docsearch.as_retriever(
958
- search_type="mmr", search_kwargs={"k": 6, "lambda_mult": 0.25}
959
- )
960
-
961
- # Fetch more documents for the MMR algorithm to consider
962
- # But only return the top 5
963
- docsearch.as_retriever(
964
- search_type="mmr", search_kwargs={"k": 5, "fetch_k": 50}
965
- )
966
-
967
- # Only retrieve documents that have a relevance score
968
- # Above a certain threshold
969
- docsearch.as_retriever(
970
- search_type="similarity_score_threshold",
971
- search_kwargs={"score_threshold": 0.8},
972
- )
931
+ # Fetch more documents for the MMR algorithm to consider
932
+ # But only return the top 5
933
+ docsearch.as_retriever(search_type="mmr", search_kwargs={"k": 5, "fetch_k": 50})
973
934
 
974
- # Only get the single most similar document from the dataset
975
- docsearch.as_retriever(search_kwargs={"k": 1})
935
+ # Only retrieve documents that have a relevance score
936
+ # Above a certain threshold
937
+ docsearch.as_retriever(
938
+ search_type="similarity_score_threshold",
939
+ search_kwargs={"score_threshold": 0.8},
940
+ )
976
941
 
977
- # Use a filter to only retrieve documents from a specific paper
978
- docsearch.as_retriever(
979
- search_kwargs={"filter": {"paper_title": "GPT-4 Technical Report"}}
980
- )
942
+ # Only get the single most similar document from the dataset
943
+ docsearch.as_retriever(search_kwargs={"k": 1})
981
944
 
945
+ # Use a filter to only retrieve documents from a specific paper
946
+ docsearch.as_retriever(
947
+ search_kwargs={"filter": {"paper_title": "GPT-4 Technical Report"}}
948
+ )
949
+ ```
982
950
  """
983
951
  tags = kwargs.pop("tags", None) or [*self._get_retriever_tags()]
984
952
  return VectorStoreRetriever(vectorstore=self, tags=tags, **kwargs)
@@ -1012,7 +980,7 @@ class VectorStoreRetriever(BaseRetriever):
1012
980
  values: Values to validate.
1013
981
 
1014
982
  Returns:
1015
- Values: Validated values.
983
+ Validated values.
1016
984
 
1017
985
  Raises:
1018
986
  ValueError: If search_type is not one of the allowed search types.
@@ -38,126 +38,124 @@ class InMemoryVectorStore(VectorStore):
38
38
  Uses a dictionary, and computes cosine similarity for search using numpy.
39
39
 
40
40
  Setup:
41
- Install ``langchain-core``.
41
+ Install `langchain-core`.
42
42
 
43
- .. code-block:: bash
44
-
45
- pip install -U langchain-core
43
+ ```bash
44
+ pip install -U langchain-core
45
+ ```
46
46
 
47
47
  Key init args — indexing params:
48
48
  embedding_function: Embeddings
49
49
  Embedding function to use.
50
50
 
51
51
  Instantiate:
52
- .. code-block:: python
53
-
54
- from langchain_core.vectorstores import InMemoryVectorStore
55
- from langchain_openai import OpenAIEmbeddings
52
+ ```python
53
+ from langchain_core.vectorstores import InMemoryVectorStore
54
+ from langchain_openai import OpenAIEmbeddings
56
55
 
57
- vector_store = InMemoryVectorStore(OpenAIEmbeddings())
56
+ vector_store = InMemoryVectorStore(OpenAIEmbeddings())
57
+ ```
58
58
 
59
59
  Add Documents:
60
- .. code-block:: python
61
-
62
- from langchain_core.documents import Document
60
+ ```python
61
+ from langchain_core.documents import Document
63
62
 
64
- document_1 = Document(id="1", page_content="foo", metadata={"baz": "bar"})
65
- document_2 = Document(id="2", page_content="thud", metadata={"bar": "baz"})
66
- document_3 = Document(id="3", page_content="i will be deleted :(")
63
+ document_1 = Document(id="1", page_content="foo", metadata={"baz": "bar"})
64
+ document_2 = Document(id="2", page_content="thud", metadata={"bar": "baz"})
65
+ document_3 = Document(id="3", page_content="i will be deleted :(")
67
66
 
68
- documents = [document_1, document_2, document_3]
69
- vector_store.add_documents(documents=documents)
67
+ documents = [document_1, document_2, document_3]
68
+ vector_store.add_documents(documents=documents)
69
+ ```
70
70
 
71
71
  Inspect documents:
72
- .. code-block:: python
73
-
74
- top_n = 10
75
- for index, (id, doc) in enumerate(vector_store.store.items()):
76
- if index < top_n:
77
- # docs have keys 'id', 'vector', 'text', 'metadata'
78
- print(f"{id}: {doc['text']}")
79
- else:
80
- break
72
+ ```python
73
+ top_n = 10
74
+ for index, (id, doc) in enumerate(vector_store.store.items()):
75
+ if index < top_n:
76
+ # docs have keys 'id', 'vector', 'text', 'metadata'
77
+ print(f"{id}: {doc['text']}")
78
+ else:
79
+ break
80
+ ```
81
81
 
82
82
  Delete Documents:
83
- .. code-block:: python
84
-
85
- vector_store.delete(ids=["3"])
83
+ ```python
84
+ vector_store.delete(ids=["3"])
85
+ ```
86
86
 
87
87
  Search:
88
- .. code-block:: python
89
-
90
- results = vector_store.similarity_search(query="thud", k=1)
91
- for doc in results:
92
- print(f"* {doc.page_content} [{doc.metadata}]")
88
+ ```python
89
+ results = vector_store.similarity_search(query="thud", k=1)
90
+ for doc in results:
91
+ print(f"* {doc.page_content} [{doc.metadata}]")
92
+ ```
93
93
 
94
- .. code-block::
95
-
96
- * thud [{'bar': 'baz'}]
94
+ ```txt
95
+ * thud [{'bar': 'baz'}]
96
+ ```
97
97
 
98
98
  Search with filter:
99
- .. code-block:: python
100
-
101
- def _filter_function(doc: Document) -> bool:
102
- return doc.metadata.get("bar") == "baz"
103
-
104
-
105
- results = vector_store.similarity_search(
106
- query="thud", k=1, filter=_filter_function
107
- )
108
- for doc in results:
109
- print(f"* {doc.page_content} [{doc.metadata}]")
99
+ ```python
100
+ def _filter_function(doc: Document) -> bool:
101
+ return doc.metadata.get("bar") == "baz"
110
102
 
111
- .. code-block::
112
103
 
113
- * thud [{'bar': 'baz'}]
104
+ results = vector_store.similarity_search(
105
+ query="thud", k=1, filter=_filter_function
106
+ )
107
+ for doc in results:
108
+ print(f"* {doc.page_content} [{doc.metadata}]")
109
+ ```
114
110
 
111
+ ```txt
112
+ * thud [{'bar': 'baz'}]
113
+ ```
115
114
 
116
115
  Search with score:
117
- .. code-block:: python
118
-
119
- results = vector_store.similarity_search_with_score(query="qux", k=1)
120
- for doc, score in results:
121
- print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
122
-
123
- .. code-block::
116
+ ```python
117
+ results = vector_store.similarity_search_with_score(query="qux", k=1)
118
+ for doc, score in results:
119
+ print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
120
+ ```
124
121
 
125
- * [SIM=0.832268] foo [{'baz': 'bar'}]
122
+ ```txt
123
+ * [SIM=0.832268] foo [{'baz': 'bar'}]
124
+ ```
126
125
 
127
126
  Async:
128
- .. code-block:: python
129
-
130
- # add documents
131
- # await vector_store.aadd_documents(documents=documents)
132
-
133
- # delete documents
134
- # await vector_store.adelete(ids=["3"])
127
+ ```python
128
+ # add documents
129
+ # await vector_store.aadd_documents(documents=documents)
135
130
 
136
- # search
137
- # results = vector_store.asimilarity_search(query="thud", k=1)
131
+ # delete documents
132
+ # await vector_store.adelete(ids=["3"])
138
133
 
139
- # search with score
140
- results = await vector_store.asimilarity_search_with_score(query="qux", k=1)
141
- for doc, score in results:
142
- print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
134
+ # search
135
+ # results = vector_store.asimilarity_search(query="thud", k=1)
143
136
 
144
- .. code-block::
137
+ # search with score
138
+ results = await vector_store.asimilarity_search_with_score(query="qux", k=1)
139
+ for doc, score in results:
140
+ print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
141
+ ```
145
142
 
146
- * [SIM=0.832268] foo [{'baz': 'bar'}]
143
+ ```txt
144
+ * [SIM=0.832268] foo [{'baz': 'bar'}]
145
+ ```
147
146
 
148
147
  Use as Retriever:
149
- .. code-block:: python
150
-
151
- retriever = vector_store.as_retriever(
152
- search_type="mmr",
153
- search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
154
- )
155
- retriever.invoke("thud")
156
-
157
- .. code-block::
158
-
159
- [Document(id='2', metadata={'bar': 'baz'}, page_content='thud')]
148
+ ```python
149
+ retriever = vector_store.as_retriever(
150
+ search_type="mmr",
151
+ search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
152
+ )
153
+ retriever.invoke("thud")
154
+ ```
160
155
 
156
+ ```txt
157
+ [Document(id='2', metadata={'bar': 'baz'}, page_content='thud')]
158
+ ```
161
159
  """
162
160
 
163
161
  def __init__(self, embedding: Embeddings) -> None:
@@ -524,7 +522,7 @@ class InMemoryVectorStore(VectorStore):
524
522
  Args:
525
523
  path: The path to load the vector store from.
526
524
  embedding: The embedding to use.
527
- kwargs: Additional arguments to pass to the constructor.
525
+ **kwargs: Additional arguments to pass to the constructor.
528
526
 
529
527
  Returns:
530
528
  A VectorStore object.
@@ -112,8 +112,8 @@ def maximal_marginal_relevance(
112
112
  Args:
113
113
  query_embedding: The query embedding.
114
114
  embedding_list: A list of embeddings.
115
- lambda_mult: The lambda parameter for MMR. Default is 0.5.
116
- k: The number of embeddings to return. Default is 4.
115
+ lambda_mult: The lambda parameter for MMR.
116
+ k: The number of embeddings to return.
117
117
 
118
118
  Returns:
119
119
  A list of indices of the embeddings to return.
langchain_core/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """langchain-core version information and utilities."""
2
2
 
3
- VERSION = "1.0.0a8"
3
+ VERSION = "1.0.0rc2"
@@ -1,10 +1,13 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-core
3
- Version: 1.0.0a8
3
+ Version: 1.0.0rc2
4
4
  Summary: Building applications with LLMs through composability
5
- Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/core
6
- Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true
7
- Project-URL: repository, https://github.com/langchain-ai/langchain
5
+ Project-URL: homepage, https://docs.langchain.com/
6
+ Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/core
7
+ Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=%22langchain-core%3D%3D1%22
8
+ Project-URL: twitter, https://x.com/LangChainAI
9
+ Project-URL: slack, https://www.langchain.com/join-community
10
+ Project-URL: reddit, https://www.reddit.com/r/LangChain/
8
11
  License: MIT
9
12
  Requires-Python: <4.0.0,>=3.10.0
10
13
  Requires-Dist: jsonpatch<2.0.0,>=1.33.0
@@ -18,8 +21,15 @@ Description-Content-Type: text/markdown
18
21
 
19
22
  # 🦜🍎️ LangChain Core
20
23
 
21
- [![PyPI - License](https://img.shields.io/pypi/l/langchain-core?style=flat-square)](https://opensource.org/licenses/MIT)
24
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain-core?label=%20)](https://pypi.org/project/langchain-core/#history)
25
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain-core)](https://opensource.org/licenses/MIT)
22
26
  [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain-core)](https://pypistats.org/packages/langchain-core)
27
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
28
+
29
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
30
+
31
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
32
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
23
33
 
24
34
  ## Quick Install
25
35
 
@@ -27,16 +37,14 @@ Description-Content-Type: text/markdown
27
37
  pip install langchain-core
28
38
  ```
29
39
 
30
- ## What is it?
40
+ ## 🤔 What is this?
31
41
 
32
- LangChain Core contains the base abstractions that power the the LangChain ecosystem.
42
+ LangChain Core contains the base abstractions that power the LangChain ecosystem.
33
43
 
34
44
  These abstractions are designed to be as modular and simple as possible.
35
45
 
36
46
  The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
37
47
 
38
- For full documentation see the [API reference](https://reference.langchain.com/python/).
39
-
40
48
  ## ⛰️ Why build on top of LangChain Core?
41
49
 
42
50
  The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
@@ -45,12 +53,16 @@ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefit
45
53
  - **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
46
54
  - **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
47
55
 
56
+ ## 📖 Documentation
57
+
58
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
59
+
48
60
  ## 📕 Releases & Versioning
49
61
 
50
- See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning Policy](https://docs.langchain.com/oss/python/versioning).
62
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
51
63
 
52
64
  ## 💁 Contributing
53
65
 
54
66
  As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
55
67
 
56
- For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing).
68
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).