langchain 0.2.12__py3-none-any.whl → 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/_api/module_import.py +2 -2
- langchain/agents/__init__.py +4 -3
- langchain/agents/agent.py +7 -11
- langchain/agents/agent_toolkits/__init__.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +114 -2
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +2 -7
- langchain/agents/agent_types.py +1 -1
- langchain/agents/chat/base.py +1 -1
- langchain/agents/conversational/base.py +1 -1
- langchain/agents/conversational_chat/base.py +1 -1
- langchain/agents/initialize.py +2 -2
- langchain/agents/json_chat/base.py +1 -1
- langchain/agents/loading.py +4 -4
- langchain/agents/mrkl/base.py +4 -4
- langchain/agents/openai_assistant/base.py +2 -2
- langchain/agents/openai_functions_agent/base.py +2 -2
- langchain/agents/openai_functions_multi_agent/base.py +2 -2
- langchain/agents/react/agent.py +1 -1
- langchain/agents/react/base.py +4 -4
- langchain/agents/self_ask_with_search/base.py +2 -2
- langchain/agents/structured_chat/base.py +3 -2
- langchain/agents/tools.py +2 -2
- langchain/agents/xml/base.py +2 -2
- langchain/chains/__init__.py +1 -0
- langchain/chains/api/base.py +121 -1
- langchain/chains/base.py +5 -7
- langchain/chains/combine_documents/map_reduce.py +2 -4
- langchain/chains/combine_documents/map_rerank.py +4 -6
- langchain/chains/combine_documents/reduce.py +1 -4
- langchain/chains/combine_documents/refine.py +2 -4
- langchain/chains/combine_documents/stuff.py +12 -4
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +5 -7
- langchain/chains/elasticsearch_database/base.py +16 -20
- langchain/chains/example_generator.py +3 -4
- langchain/chains/flare/base.py +1 -1
- langchain/chains/hyde/base.py +1 -4
- langchain/chains/llm.py +2 -4
- langchain/chains/llm_checker/base.py +12 -4
- langchain/chains/llm_math/base.py +2 -4
- langchain/chains/llm_summarization_checker/base.py +12 -4
- langchain/chains/loading.py +17 -0
- langchain/chains/mapreduce.py +12 -4
- langchain/chains/natbot/base.py +2 -4
- langchain/chains/openai_functions/__init__.py +2 -0
- langchain/chains/openai_functions/base.py +2 -2
- langchain/chains/openai_functions/citation_fuzzy_match.py +54 -1
- langchain/chains/openai_functions/extraction.py +2 -2
- langchain/chains/openai_functions/openapi.py +88 -1
- langchain/chains/openai_functions/qa_with_structure.py +19 -0
- langchain/chains/openai_functions/tagging.py +81 -0
- langchain/chains/openai_tools/extraction.py +1 -1
- langchain/chains/qa_with_sources/base.py +21 -4
- langchain/chains/qa_with_sources/loading.py +16 -0
- langchain/chains/query_constructor/base.py +8 -2
- langchain/chains/query_constructor/schema.py +0 -2
- langchain/chains/question_answering/chain.py +15 -0
- langchain/chains/retrieval_qa/base.py +30 -6
- langchain/chains/router/base.py +1 -4
- langchain/chains/router/embedding_router.py +1 -4
- langchain/chains/router/llm_router.py +76 -1
- langchain/chains/router/multi_prompt.py +76 -1
- langchain/chains/sequential.py +3 -7
- langchain/chains/structured_output/base.py +3 -3
- langchain/chat_models/base.py +8 -10
- langchain/evaluation/agents/trajectory_eval_chain.py +2 -4
- langchain/evaluation/comparison/eval_chain.py +2 -4
- langchain/evaluation/criteria/eval_chain.py +2 -4
- langchain/evaluation/embedding_distance/base.py +0 -2
- langchain/evaluation/parsing/json_schema.py +1 -1
- langchain/evaluation/qa/eval_chain.py +2 -7
- langchain/evaluation/schema.py +8 -8
- langchain/evaluation/scoring/eval_chain.py +2 -4
- langchain/evaluation/string_distance/base.py +4 -4
- langchain/hub.py +60 -26
- langchain/indexes/vectorstore.py +3 -7
- langchain/memory/entity.py +0 -2
- langchain/memory/summary.py +9 -0
- langchain/output_parsers/retry.py +1 -1
- langchain/retrievers/contextual_compression.py +0 -2
- langchain/retrievers/document_compressors/base.py +0 -2
- langchain/retrievers/document_compressors/chain_filter.py +1 -1
- langchain/retrievers/document_compressors/cohere_rerank.py +3 -5
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +1 -4
- langchain/retrievers/document_compressors/embeddings_filter.py +0 -2
- langchain/retrievers/document_compressors/listwise_rerank.py +1 -1
- langchain/retrievers/multi_query.py +4 -2
- langchain/retrievers/re_phraser.py +1 -1
- langchain/retrievers/self_query/base.py +1 -3
- langchain/retrievers/time_weighted_retriever.py +0 -2
- langchain/tools/__init__.py +14 -5
- langchain/tools/render.py +0 -2
- langchain/tools/retriever.py +0 -4
- {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/METADATA +2 -2
- {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/RECORD +98 -98
- {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/LICENSE +0 -0
- {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/WHEEL +0 -0
- {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/entry_points.txt +0 -0
|
@@ -6,10 +6,11 @@ import warnings
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from typing import Any, Dict, List, Optional
|
|
8
8
|
|
|
9
|
+
from langchain_core._api import deprecated
|
|
9
10
|
from langchain_core.callbacks import CallbackManagerForChainRun
|
|
10
11
|
from langchain_core.language_models import BaseLanguageModel
|
|
11
12
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
12
|
-
from langchain_core.pydantic_v1 import
|
|
13
|
+
from langchain_core.pydantic_v1 import root_validator
|
|
13
14
|
|
|
14
15
|
from langchain.chains.base import Chain
|
|
15
16
|
from langchain.chains.llm import LLMChain
|
|
@@ -65,6 +66,15 @@ def _load_sequential_chain(
|
|
|
65
66
|
return chain
|
|
66
67
|
|
|
67
68
|
|
|
69
|
+
@deprecated(
|
|
70
|
+
since="0.2.13",
|
|
71
|
+
message=(
|
|
72
|
+
"See LangGraph guides for a variety of self-reflection and corrective "
|
|
73
|
+
"strategies for question-answering and other tasks: "
|
|
74
|
+
"https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/"
|
|
75
|
+
),
|
|
76
|
+
removal="1.0",
|
|
77
|
+
)
|
|
68
78
|
class LLMSummarizationCheckerChain(Chain):
|
|
69
79
|
"""Chain for question-answering with self-verification.
|
|
70
80
|
|
|
@@ -96,10 +106,8 @@ class LLMSummarizationCheckerChain(Chain):
|
|
|
96
106
|
"""Maximum number of times to check the assertions. Default to double-checking."""
|
|
97
107
|
|
|
98
108
|
class Config:
|
|
99
|
-
"""Configuration for this pydantic object."""
|
|
100
|
-
|
|
101
|
-
extra = Extra.forbid
|
|
102
109
|
arbitrary_types_allowed = True
|
|
110
|
+
extra = "forbid"
|
|
103
111
|
|
|
104
112
|
@root_validator(pre=True)
|
|
105
113
|
def raise_deprecation(cls, values: Dict) -> Dict:
|
langchain/chains/loading.py
CHANGED
|
@@ -7,6 +7,7 @@ from pathlib import Path
|
|
|
7
7
|
from typing import TYPE_CHECKING, Any, Union
|
|
8
8
|
|
|
9
9
|
import yaml
|
|
10
|
+
from langchain_core._api import deprecated
|
|
10
11
|
from langchain_core.prompts.loading import (
|
|
11
12
|
_load_output_parser,
|
|
12
13
|
load_prompt,
|
|
@@ -649,6 +650,14 @@ type_to_loader_dict = {
|
|
|
649
650
|
}
|
|
650
651
|
|
|
651
652
|
|
|
653
|
+
@deprecated(
|
|
654
|
+
since="0.2.13",
|
|
655
|
+
message=(
|
|
656
|
+
"This function is deprecated and will be removed in langchain 1.0. "
|
|
657
|
+
"At that point chains must be imported from their respective modules."
|
|
658
|
+
),
|
|
659
|
+
removal="1.0",
|
|
660
|
+
)
|
|
652
661
|
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
|
|
653
662
|
"""Load chain from Config Dict."""
|
|
654
663
|
if "_type" not in config:
|
|
@@ -662,6 +671,14 @@ def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
|
|
|
662
671
|
return chain_loader(config, **kwargs)
|
|
663
672
|
|
|
664
673
|
|
|
674
|
+
@deprecated(
|
|
675
|
+
since="0.2.13",
|
|
676
|
+
message=(
|
|
677
|
+
"This function is deprecated and will be removed in langchain 1.0. "
|
|
678
|
+
"At that point chains must be imported from their respective modules."
|
|
679
|
+
),
|
|
680
|
+
removal="1.0",
|
|
681
|
+
)
|
|
665
682
|
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
|
|
666
683
|
"""Unified method for loading a chain from LangChainHub or local fs."""
|
|
667
684
|
if isinstance(path, str) and path.startswith("lc://"):
|
langchain/chains/mapreduce.py
CHANGED
|
@@ -8,11 +8,11 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
from typing import Any, Dict, List, Mapping, Optional
|
|
10
10
|
|
|
11
|
+
from langchain_core._api import deprecated
|
|
11
12
|
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
|
|
12
13
|
from langchain_core.documents import Document
|
|
13
14
|
from langchain_core.language_models import BaseLanguageModel
|
|
14
15
|
from langchain_core.prompts import BasePromptTemplate
|
|
15
|
-
from langchain_core.pydantic_v1 import Extra
|
|
16
16
|
from langchain_text_splitters import TextSplitter
|
|
17
17
|
|
|
18
18
|
from langchain.chains import ReduceDocumentsChain
|
|
@@ -23,6 +23,16 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
|
|
23
23
|
from langchain.chains.llm import LLMChain
|
|
24
24
|
|
|
25
25
|
|
|
26
|
+
@deprecated(
|
|
27
|
+
since="0.2.13",
|
|
28
|
+
removal="1.0",
|
|
29
|
+
message=(
|
|
30
|
+
"Refer here for a recommended map-reduce implementation using langgraph: "
|
|
31
|
+
"https://langchain-ai.github.io/langgraph/how-tos/map-reduce/. See also "
|
|
32
|
+
"migration guide: "
|
|
33
|
+
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
|
|
34
|
+
),
|
|
35
|
+
)
|
|
26
36
|
class MapReduceChain(Chain):
|
|
27
37
|
"""Map-reduce chain."""
|
|
28
38
|
|
|
@@ -68,10 +78,8 @@ class MapReduceChain(Chain):
|
|
|
68
78
|
)
|
|
69
79
|
|
|
70
80
|
class Config:
|
|
71
|
-
"""Configuration for this pydantic object."""
|
|
72
|
-
|
|
73
|
-
extra = Extra.forbid
|
|
74
81
|
arbitrary_types_allowed = True
|
|
82
|
+
extra = "forbid"
|
|
75
83
|
|
|
76
84
|
@property
|
|
77
85
|
def input_keys(self) -> List[str]:
|
langchain/chains/natbot/base.py
CHANGED
|
@@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional
|
|
|
7
7
|
|
|
8
8
|
from langchain_core.callbacks import CallbackManagerForChainRun
|
|
9
9
|
from langchain_core.language_models import BaseLanguageModel
|
|
10
|
-
from langchain_core.pydantic_v1 import
|
|
10
|
+
from langchain_core.pydantic_v1 import root_validator
|
|
11
11
|
|
|
12
12
|
from langchain.chains.base import Chain
|
|
13
13
|
from langchain.chains.llm import LLMChain
|
|
@@ -48,10 +48,8 @@ class NatBotChain(Chain):
|
|
|
48
48
|
output_key: str = "command" #: :meta private:
|
|
49
49
|
|
|
50
50
|
class Config:
|
|
51
|
-
"""Configuration for this pydantic object."""
|
|
52
|
-
|
|
53
|
-
extra = Extra.forbid
|
|
54
51
|
arbitrary_types_allowed = True
|
|
52
|
+
extra = "forbid"
|
|
55
53
|
|
|
56
54
|
@root_validator(pre=True)
|
|
57
55
|
def raise_deprecation(cls, values: Dict) -> Dict:
|
|
@@ -6,6 +6,7 @@ from langchain.chains.openai_functions.base import (
|
|
|
6
6
|
)
|
|
7
7
|
from langchain.chains.openai_functions.citation_fuzzy_match import (
|
|
8
8
|
create_citation_fuzzy_match_chain,
|
|
9
|
+
create_citation_fuzzy_match_runnable,
|
|
9
10
|
)
|
|
10
11
|
from langchain.chains.openai_functions.extraction import (
|
|
11
12
|
create_extraction_chain,
|
|
@@ -32,6 +33,7 @@ __all__ = [
|
|
|
32
33
|
"create_extraction_chain_pydantic",
|
|
33
34
|
"create_extraction_chain",
|
|
34
35
|
"create_citation_fuzzy_match_chain",
|
|
36
|
+
"create_citation_fuzzy_match_runnable",
|
|
35
37
|
"create_qa_with_structure_chain",
|
|
36
38
|
"create_qa_with_sources_chain",
|
|
37
39
|
"create_structured_output_chain",
|
|
@@ -43,7 +43,7 @@ __all__ = [
|
|
|
43
43
|
]
|
|
44
44
|
|
|
45
45
|
|
|
46
|
-
@deprecated(since="0.1.1", removal="
|
|
46
|
+
@deprecated(since="0.1.1", removal="1.0", alternative="create_openai_fn_runnable")
|
|
47
47
|
def create_openai_fn_chain(
|
|
48
48
|
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
|
|
49
49
|
llm: BaseLanguageModel,
|
|
@@ -145,7 +145,7 @@ def create_openai_fn_chain(
|
|
|
145
145
|
|
|
146
146
|
|
|
147
147
|
@deprecated(
|
|
148
|
-
since="0.1.1", removal="
|
|
148
|
+
since="0.1.1", removal="1.0", alternative="ChatOpenAI.with_structured_output"
|
|
149
149
|
)
|
|
150
150
|
def create_structured_output_chain(
|
|
151
151
|
output_schema: Union[Dict[str, Any], Type[BaseModel]],
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
from typing import Iterator, List
|
|
2
2
|
|
|
3
|
-
from langchain_core.
|
|
3
|
+
from langchain_core._api import deprecated
|
|
4
|
+
from langchain_core.language_models import BaseChatModel, BaseLanguageModel
|
|
4
5
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
5
6
|
from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser
|
|
6
7
|
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
|
|
7
8
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
9
|
+
from langchain_core.runnables import Runnable
|
|
8
10
|
|
|
9
11
|
from langchain.chains.llm import LLMChain
|
|
10
12
|
from langchain.chains.openai_functions.utils import get_llm_kwargs
|
|
@@ -61,6 +63,57 @@ class QuestionAnswer(BaseModel):
|
|
|
61
63
|
)
|
|
62
64
|
|
|
63
65
|
|
|
66
|
+
def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
|
|
67
|
+
"""Create a citation fuzzy match Runnable.
|
|
68
|
+
|
|
69
|
+
Example usage:
|
|
70
|
+
|
|
71
|
+
.. code-block:: python
|
|
72
|
+
|
|
73
|
+
from langchain.chains import create_citation_fuzzy_match_runnable
|
|
74
|
+
from langchain_openai import ChatOpenAI
|
|
75
|
+
|
|
76
|
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
|
77
|
+
|
|
78
|
+
context = "Alice has blue eyes. Bob has brown eyes. Charlie has green eyes."
|
|
79
|
+
question = "What color are Bob's eyes?"
|
|
80
|
+
|
|
81
|
+
chain = create_citation_fuzzy_match_runnable(llm)
|
|
82
|
+
chain.invoke({"question": question, "context": context})
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
llm: Language model to use for the chain. Must implement bind_tools.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Runnable that can be used to answer questions with citations.
|
|
89
|
+
"""
|
|
90
|
+
if llm.bind_tools is BaseChatModel.bind_tools:
|
|
91
|
+
raise ValueError(
|
|
92
|
+
"Language model must implement bind_tools to use this function."
|
|
93
|
+
)
|
|
94
|
+
prompt = ChatPromptTemplate(
|
|
95
|
+
[
|
|
96
|
+
SystemMessage(
|
|
97
|
+
"You are a world class algorithm to answer "
|
|
98
|
+
"questions with correct and exact citations."
|
|
99
|
+
),
|
|
100
|
+
HumanMessagePromptTemplate.from_template(
|
|
101
|
+
"Answer question using the following context."
|
|
102
|
+
"\n\n{context}"
|
|
103
|
+
"\n\nQuestion: {question}"
|
|
104
|
+
"\n\nTips: Make sure to cite your sources, "
|
|
105
|
+
"and use the exact words from the context."
|
|
106
|
+
),
|
|
107
|
+
]
|
|
108
|
+
)
|
|
109
|
+
return prompt | llm.with_structured_output(QuestionAnswer)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@deprecated(
|
|
113
|
+
since="0.2.13",
|
|
114
|
+
removal="1.0",
|
|
115
|
+
alternative="create_citation_fuzzy_match_runnable",
|
|
116
|
+
)
|
|
64
117
|
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|
65
118
|
"""Create a citation fuzzy match chain.
|
|
66
119
|
|
|
@@ -58,7 +58,7 @@ Passage:
|
|
|
58
58
|
"feedback here:"
|
|
59
59
|
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
60
60
|
),
|
|
61
|
-
removal="
|
|
61
|
+
removal="1.0",
|
|
62
62
|
alternative=(
|
|
63
63
|
"""
|
|
64
64
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
@@ -128,7 +128,7 @@ def create_extraction_chain(
|
|
|
128
128
|
"feedback here:"
|
|
129
129
|
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
130
130
|
),
|
|
131
|
-
removal="
|
|
131
|
+
removal="1.0",
|
|
132
132
|
alternative=(
|
|
133
133
|
"""
|
|
134
134
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
@@ -6,6 +6,7 @@ from collections import defaultdict
|
|
|
6
6
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
|
7
7
|
|
|
8
8
|
import requests
|
|
9
|
+
from langchain_core._api import deprecated
|
|
9
10
|
from langchain_core.callbacks import CallbackManagerForChainRun
|
|
10
11
|
from langchain_core.language_models import BaseLanguageModel
|
|
11
12
|
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
|
|
@@ -242,6 +243,15 @@ class SimpleRequestChain(Chain):
|
|
|
242
243
|
return {self.output_key: response}
|
|
243
244
|
|
|
244
245
|
|
|
246
|
+
@deprecated(
|
|
247
|
+
since="0.2.13",
|
|
248
|
+
message=(
|
|
249
|
+
"This function is deprecated and will be removed in langchain 1.0. "
|
|
250
|
+
"See API reference for replacement: "
|
|
251
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html" # noqa: E501
|
|
252
|
+
),
|
|
253
|
+
removal="1.0",
|
|
254
|
+
)
|
|
245
255
|
def get_openapi_chain(
|
|
246
256
|
spec: Union[OpenAPISpec, str],
|
|
247
257
|
llm: Optional[BaseLanguageModel] = None,
|
|
@@ -255,13 +265,90 @@ def get_openapi_chain(
|
|
|
255
265
|
) -> SequentialChain:
|
|
256
266
|
"""Create a chain for querying an API from a OpenAPI spec.
|
|
257
267
|
|
|
268
|
+
Note: this class is deprecated. See below for a replacement implementation.
|
|
269
|
+
The benefits of this implementation are:
|
|
270
|
+
|
|
271
|
+
- Uses LLM tool calling features to encourage properly-formatted API requests;
|
|
272
|
+
- Includes async support.
|
|
273
|
+
|
|
274
|
+
.. code-block:: python
|
|
275
|
+
|
|
276
|
+
from typing import Any
|
|
277
|
+
|
|
278
|
+
from langchain.chains.openai_functions.openapi import openapi_spec_to_openai_fn
|
|
279
|
+
from langchain_community.utilities.openapi import OpenAPISpec
|
|
280
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
281
|
+
from langchain_openai import ChatOpenAI
|
|
282
|
+
|
|
283
|
+
# Define API spec. Can be JSON or YAML
|
|
284
|
+
api_spec = \"\"\"
|
|
285
|
+
{
|
|
286
|
+
"openapi": "3.1.0",
|
|
287
|
+
"info": {
|
|
288
|
+
"title": "JSONPlaceholder API",
|
|
289
|
+
"version": "1.0.0"
|
|
290
|
+
},
|
|
291
|
+
"servers": [
|
|
292
|
+
{
|
|
293
|
+
"url": "https://jsonplaceholder.typicode.com"
|
|
294
|
+
}
|
|
295
|
+
],
|
|
296
|
+
"paths": {
|
|
297
|
+
"/posts": {
|
|
298
|
+
"get": {
|
|
299
|
+
"summary": "Get posts",
|
|
300
|
+
"parameters": [
|
|
301
|
+
{
|
|
302
|
+
"name": "_limit",
|
|
303
|
+
"in": "query",
|
|
304
|
+
"required": false,
|
|
305
|
+
"schema": {
|
|
306
|
+
"type": "integer",
|
|
307
|
+
"example": 2
|
|
308
|
+
},
|
|
309
|
+
"description": "Limit the number of results"
|
|
310
|
+
}
|
|
311
|
+
]
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
\"\"\"
|
|
317
|
+
|
|
318
|
+
parsed_spec = OpenAPISpec.from_text(api_spec)
|
|
319
|
+
openai_fns, call_api_fn = openapi_spec_to_openai_fn(parsed_spec)
|
|
320
|
+
tools = [
|
|
321
|
+
{"type": "function", "function": fn}
|
|
322
|
+
for fn in openai_fns
|
|
323
|
+
]
|
|
324
|
+
|
|
325
|
+
prompt = ChatPromptTemplate.from_template(
|
|
326
|
+
"Use the provided APIs to respond to this user query:\\n\\n{query}"
|
|
327
|
+
)
|
|
328
|
+
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools)
|
|
329
|
+
|
|
330
|
+
def _execute_tool(message) -> Any:
|
|
331
|
+
if tool_calls := message.tool_calls:
|
|
332
|
+
tool_call = message.tool_calls[0]
|
|
333
|
+
response = call_api_fn(name=tool_call["name"], fn_args=tool_call["args"])
|
|
334
|
+
response.raise_for_status()
|
|
335
|
+
return response.json()
|
|
336
|
+
else:
|
|
337
|
+
return message.content
|
|
338
|
+
|
|
339
|
+
chain = prompt | llm | _execute_tool
|
|
340
|
+
|
|
341
|
+
.. code-block:: python
|
|
342
|
+
|
|
343
|
+
response = chain.invoke({"query": "Get me top two posts."})
|
|
344
|
+
|
|
258
345
|
Args:
|
|
259
346
|
spec: OpenAPISpec or url/file/text string corresponding to one.
|
|
260
347
|
llm: language model, should be an OpenAI function-calling model, e.g.
|
|
261
348
|
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
|
|
262
349
|
prompt: Main prompt template to use.
|
|
263
350
|
request_chain: Chain for taking the functions output and executing the request.
|
|
264
|
-
"""
|
|
351
|
+
""" # noqa: E501
|
|
265
352
|
try:
|
|
266
353
|
from langchain_community.utilities.openapi import OpenAPISpec
|
|
267
354
|
except ImportError as e:
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from typing import Any, List, Optional, Type, Union, cast
|
|
2
2
|
|
|
3
|
+
from langchain_core._api import deprecated
|
|
3
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
4
5
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
5
6
|
from langchain_core.output_parsers import BaseLLMOutputParser
|
|
@@ -25,6 +26,15 @@ class AnswerWithSources(BaseModel):
|
|
|
25
26
|
)
|
|
26
27
|
|
|
27
28
|
|
|
29
|
+
@deprecated(
|
|
30
|
+
since="0.2.13",
|
|
31
|
+
removal="1.0",
|
|
32
|
+
message=(
|
|
33
|
+
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
34
|
+
"answering with structured responses: "
|
|
35
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
|
|
36
|
+
),
|
|
37
|
+
)
|
|
28
38
|
def create_qa_with_structure_chain(
|
|
29
39
|
llm: BaseLanguageModel,
|
|
30
40
|
schema: Union[dict, Type[BaseModel]],
|
|
@@ -95,6 +105,15 @@ def create_qa_with_structure_chain(
|
|
|
95
105
|
return chain
|
|
96
106
|
|
|
97
107
|
|
|
108
|
+
@deprecated(
|
|
109
|
+
since="0.2.13",
|
|
110
|
+
removal="1.0",
|
|
111
|
+
message=(
|
|
112
|
+
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
113
|
+
"answering with sources: "
|
|
114
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
|
|
115
|
+
),
|
|
116
|
+
)
|
|
98
117
|
def create_qa_with_sources_chain(
|
|
99
118
|
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
|
|
100
119
|
) -> LLMChain:
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from typing import Any, Optional
|
|
2
2
|
|
|
3
|
+
from langchain_core._api import deprecated
|
|
3
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
4
5
|
from langchain_core.output_parsers.openai_functions import (
|
|
5
6
|
JsonOutputFunctionsParser,
|
|
@@ -29,6 +30,21 @@ Passage:
|
|
|
29
30
|
"""
|
|
30
31
|
|
|
31
32
|
|
|
33
|
+
@deprecated(
|
|
34
|
+
since="0.2.13",
|
|
35
|
+
message=(
|
|
36
|
+
"LangChain has introduced a method called `with_structured_output` that "
|
|
37
|
+
"is available on ChatModels capable of tool calling. "
|
|
38
|
+
"See API reference for this function for replacement: "
|
|
39
|
+
"<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain.html> " # noqa: E501
|
|
40
|
+
"You can read more about `with_structured_output` here: "
|
|
41
|
+
"<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
|
|
42
|
+
"If you notice other issues, please provide "
|
|
43
|
+
"feedback here: "
|
|
44
|
+
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
45
|
+
),
|
|
46
|
+
removal="1.0",
|
|
47
|
+
)
|
|
32
48
|
def create_tagging_chain(
|
|
33
49
|
schema: dict,
|
|
34
50
|
llm: BaseLanguageModel,
|
|
@@ -38,6 +54,32 @@ def create_tagging_chain(
|
|
|
38
54
|
"""Create a chain that extracts information from a passage
|
|
39
55
|
based on a schema.
|
|
40
56
|
|
|
57
|
+
This function is deprecated. Please use `with_structured_output` instead.
|
|
58
|
+
See example usage below:
|
|
59
|
+
|
|
60
|
+
.. code-block:: python
|
|
61
|
+
|
|
62
|
+
from typing_extensions import Annotated, TypedDict
|
|
63
|
+
from langchain_anthropic import ChatAnthropic
|
|
64
|
+
|
|
65
|
+
class Joke(TypedDict):
|
|
66
|
+
\"\"\"Tagged joke.\"\"\"
|
|
67
|
+
|
|
68
|
+
setup: Annotated[str, ..., "The setup of the joke"]
|
|
69
|
+
punchline: Annotated[str, ..., "The punchline of the joke"]
|
|
70
|
+
|
|
71
|
+
# Or any other chat model that supports tools.
|
|
72
|
+
# Please reference to to the documentation of structured_output
|
|
73
|
+
# to see an up to date list of which models support
|
|
74
|
+
# with_structured_output.
|
|
75
|
+
model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0)
|
|
76
|
+
structured_llm = model.with_structured_output(Joke)
|
|
77
|
+
structured_llm.invoke(
|
|
78
|
+
"Why did the cat cross the road? To get to the other "
|
|
79
|
+
"side... and then lay down in the middle of it!"
|
|
80
|
+
)
|
|
81
|
+
Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
|
|
82
|
+
|
|
41
83
|
Args:
|
|
42
84
|
schema: The schema of the entities to extract.
|
|
43
85
|
llm: The language model to use.
|
|
@@ -59,6 +101,21 @@ def create_tagging_chain(
|
|
|
59
101
|
return chain
|
|
60
102
|
|
|
61
103
|
|
|
104
|
+
@deprecated(
|
|
105
|
+
since="0.2.13",
|
|
106
|
+
message=(
|
|
107
|
+
"LangChain has introduced a method called `with_structured_output` that "
|
|
108
|
+
"is available on ChatModels capable of tool calling. "
|
|
109
|
+
"See API reference for this function for replacement: "
|
|
110
|
+
"<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain_pydantic.html> " # noqa: E501
|
|
111
|
+
"You can read more about `with_structured_output` here: "
|
|
112
|
+
"<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
|
|
113
|
+
"If you notice other issues, please provide "
|
|
114
|
+
"feedback here: "
|
|
115
|
+
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
116
|
+
),
|
|
117
|
+
removal="1.0",
|
|
118
|
+
)
|
|
62
119
|
def create_tagging_chain_pydantic(
|
|
63
120
|
pydantic_schema: Any,
|
|
64
121
|
llm: BaseLanguageModel,
|
|
@@ -68,6 +125,30 @@ def create_tagging_chain_pydantic(
|
|
|
68
125
|
"""Create a chain that extracts information from a passage
|
|
69
126
|
based on a pydantic schema.
|
|
70
127
|
|
|
128
|
+
This function is deprecated. Please use `with_structured_output` instead.
|
|
129
|
+
See example usage below:
|
|
130
|
+
|
|
131
|
+
.. code-block:: python
|
|
132
|
+
|
|
133
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
134
|
+
from langchain_anthropic import ChatAnthropic
|
|
135
|
+
|
|
136
|
+
class Joke(BaseModel):
|
|
137
|
+
setup: str = Field(description="The setup of the joke")
|
|
138
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
139
|
+
|
|
140
|
+
# Or any other chat model that supports tools.
|
|
141
|
+
# Please reference to to the documentation of structured_output
|
|
142
|
+
# to see an up to date list of which models support
|
|
143
|
+
# with_structured_output.
|
|
144
|
+
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
145
|
+
structured_llm = model.with_structured_output(Joke)
|
|
146
|
+
structured_llm.invoke(
|
|
147
|
+
"Why did the cat cross the road? To get to the other "
|
|
148
|
+
"side... and then lay down in the middle of it!"
|
|
149
|
+
)
|
|
150
|
+
Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
|
|
151
|
+
|
|
71
152
|
Args:
|
|
72
153
|
pydantic_schema: The pydantic schema of the entities to extract.
|
|
73
154
|
llm: The language model to use.
|
|
@@ -29,7 +29,7 @@ If a property is not present and is not required in the function parameters, do
|
|
|
29
29
|
"feedback here:"
|
|
30
30
|
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
31
31
|
),
|
|
32
|
-
removal="
|
|
32
|
+
removal="1.0",
|
|
33
33
|
alternative=(
|
|
34
34
|
"""
|
|
35
35
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
@@ -7,6 +7,7 @@ import re
|
|
|
7
7
|
from abc import ABC, abstractmethod
|
|
8
8
|
from typing import Any, Dict, List, Optional, Tuple
|
|
9
9
|
|
|
10
|
+
from langchain_core._api import deprecated
|
|
10
11
|
from langchain_core.callbacks import (
|
|
11
12
|
AsyncCallbackManagerForChainRun,
|
|
12
13
|
CallbackManagerForChainRun,
|
|
@@ -14,7 +15,7 @@ from langchain_core.callbacks import (
|
|
|
14
15
|
from langchain_core.documents import Document
|
|
15
16
|
from langchain_core.language_models import BaseLanguageModel
|
|
16
17
|
from langchain_core.prompts import BasePromptTemplate
|
|
17
|
-
from langchain_core.pydantic_v1 import
|
|
18
|
+
from langchain_core.pydantic_v1 import root_validator
|
|
18
19
|
|
|
19
20
|
from langchain.chains import ReduceDocumentsChain
|
|
20
21
|
from langchain.chains.base import Chain
|
|
@@ -30,6 +31,15 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
|
|
|
30
31
|
)
|
|
31
32
|
|
|
32
33
|
|
|
34
|
+
@deprecated(
|
|
35
|
+
since="0.2.13",
|
|
36
|
+
removal="1.0",
|
|
37
|
+
message=(
|
|
38
|
+
"This class is deprecated. Refer to this guide on retrieval and question "
|
|
39
|
+
"answering with sources: "
|
|
40
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
|
|
41
|
+
),
|
|
42
|
+
)
|
|
33
43
|
class BaseQAWithSourcesChain(Chain, ABC):
|
|
34
44
|
"""Question answering chain with sources over documents."""
|
|
35
45
|
|
|
@@ -88,10 +98,8 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
88
98
|
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
|
89
99
|
|
|
90
100
|
class Config:
|
|
91
|
-
"""Configuration for this pydantic object."""
|
|
92
|
-
|
|
93
|
-
extra = Extra.forbid
|
|
94
101
|
arbitrary_types_allowed = True
|
|
102
|
+
extra = "forbid"
|
|
95
103
|
|
|
96
104
|
@property
|
|
97
105
|
def input_keys(self) -> List[str]:
|
|
@@ -200,6 +208,15 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
200
208
|
return result
|
|
201
209
|
|
|
202
210
|
|
|
211
|
+
@deprecated(
|
|
212
|
+
since="0.2.13",
|
|
213
|
+
removal="1.0",
|
|
214
|
+
message=(
|
|
215
|
+
"This class is deprecated. Refer to this guide on retrieval and question "
|
|
216
|
+
"answering with sources: "
|
|
217
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
|
|
218
|
+
),
|
|
219
|
+
)
|
|
203
220
|
class QAWithSourcesChain(BaseQAWithSourcesChain):
|
|
204
221
|
"""Question answering with sources over documents."""
|
|
205
222
|
|
|
@@ -4,6 +4,7 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from typing import Any, Mapping, Optional, Protocol
|
|
6
6
|
|
|
7
|
+
from langchain_core._api import deprecated
|
|
7
8
|
from langchain_core.language_models import BaseLanguageModel
|
|
8
9
|
from langchain_core.prompts import BasePromptTemplate
|
|
9
10
|
|
|
@@ -151,6 +152,21 @@ def _load_refine_chain(
|
|
|
151
152
|
)
|
|
152
153
|
|
|
153
154
|
|
|
155
|
+
@deprecated(
|
|
156
|
+
since="0.2.13",
|
|
157
|
+
removal="1.0",
|
|
158
|
+
message=(
|
|
159
|
+
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
160
|
+
"answering with sources: "
|
|
161
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
|
|
162
|
+
"\nSee also the following migration guides for replacements "
|
|
163
|
+
"based on `chain_type`:\n"
|
|
164
|
+
"stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
|
|
165
|
+
"map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
|
|
166
|
+
"refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
|
|
167
|
+
"map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
|
|
168
|
+
),
|
|
169
|
+
)
|
|
154
170
|
def load_qa_with_sources_chain(
|
|
155
171
|
llm: BaseLanguageModel,
|
|
156
172
|
chain_type: str = "stuff",
|
|
@@ -5,6 +5,7 @@ from __future__ import annotations
|
|
|
5
5
|
import json
|
|
6
6
|
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
|
|
7
7
|
|
|
8
|
+
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.exceptions import OutputParserException
|
|
9
10
|
from langchain_core.language_models import BaseLanguageModel
|
|
10
11
|
from langchain_core.output_parsers import BaseOutputParser
|
|
@@ -217,7 +218,7 @@ def get_query_constructor_prompt(
|
|
|
217
218
|
enable_limit: Whether to enable the limit operator. Defaults to False.
|
|
218
219
|
schema_prompt: Prompt for describing query schema. Should have string input
|
|
219
220
|
variables allowed_comparators and allowed_operators.
|
|
220
|
-
|
|
221
|
+
kwargs: Additional named params to pass to FewShotPromptTemplate init.
|
|
221
222
|
|
|
222
223
|
Returns:
|
|
223
224
|
A prompt template that can be used to construct queries.
|
|
@@ -257,6 +258,11 @@ def get_query_constructor_prompt(
|
|
|
257
258
|
)
|
|
258
259
|
|
|
259
260
|
|
|
261
|
+
@deprecated(
|
|
262
|
+
since="0.2.13",
|
|
263
|
+
alternative="load_query_constructor_runnable",
|
|
264
|
+
removal="1.0",
|
|
265
|
+
)
|
|
260
266
|
def load_query_constructor_chain(
|
|
261
267
|
llm: BaseLanguageModel,
|
|
262
268
|
document_contents: str,
|
|
@@ -339,7 +345,7 @@ def load_query_constructor_runnable(
|
|
|
339
345
|
variables allowed_comparators and allowed_operators.
|
|
340
346
|
fix_invalid: Whether to fix invalid filter directives by ignoring invalid
|
|
341
347
|
operators, comparators and attributes.
|
|
342
|
-
|
|
348
|
+
kwargs: Additional named params to pass to FewShotPromptTemplate init.
|
|
343
349
|
|
|
344
350
|
Returns:
|
|
345
351
|
A Runnable that can be used to construct queries.
|