langchain 0.2.12__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/agent.py +5 -9
- langchain/agents/agent_toolkits/vectorstore/base.py +114 -2
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +0 -6
- langchain/agents/initialize.py +1 -1
- langchain/agents/loading.py +2 -2
- langchain/agents/mrkl/base.py +1 -1
- langchain/agents/openai_assistant/base.py +2 -2
- langchain/agents/openai_functions_agent/base.py +1 -1
- langchain/agents/openai_functions_multi_agent/base.py +1 -1
- langchain/chains/__init__.py +1 -0
- langchain/chains/api/base.py +121 -1
- langchain/chains/base.py +0 -2
- langchain/chains/combine_documents/map_reduce.py +2 -4
- langchain/chains/combine_documents/map_rerank.py +4 -6
- langchain/chains/combine_documents/reduce.py +1 -4
- langchain/chains/combine_documents/refine.py +2 -4
- langchain/chains/combine_documents/stuff.py +12 -4
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +4 -6
- langchain/chains/elasticsearch_database/base.py +16 -20
- langchain/chains/example_generator.py +3 -4
- langchain/chains/flare/base.py +1 -1
- langchain/chains/hyde/base.py +1 -4
- langchain/chains/llm.py +2 -4
- langchain/chains/llm_checker/base.py +12 -4
- langchain/chains/llm_math/base.py +2 -4
- langchain/chains/llm_summarization_checker/base.py +12 -4
- langchain/chains/loading.py +17 -0
- langchain/chains/mapreduce.py +12 -4
- langchain/chains/natbot/base.py +2 -4
- langchain/chains/openai_functions/__init__.py +2 -0
- langchain/chains/openai_functions/citation_fuzzy_match.py +54 -1
- langchain/chains/openai_functions/openapi.py +88 -1
- langchain/chains/openai_functions/qa_with_structure.py +19 -0
- langchain/chains/openai_functions/tagging.py +81 -0
- langchain/chains/qa_with_sources/base.py +21 -4
- langchain/chains/qa_with_sources/loading.py +16 -0
- langchain/chains/query_constructor/base.py +8 -2
- langchain/chains/query_constructor/schema.py +0 -2
- langchain/chains/question_answering/chain.py +15 -0
- langchain/chains/retrieval_qa/base.py +30 -6
- langchain/chains/router/base.py +1 -4
- langchain/chains/router/embedding_router.py +1 -4
- langchain/chains/router/llm_router.py +76 -1
- langchain/chains/router/multi_prompt.py +76 -1
- langchain/chains/sequential.py +3 -7
- langchain/chains/structured_output/base.py +1 -1
- langchain/chat_models/base.py +8 -10
- langchain/evaluation/agents/trajectory_eval_chain.py +2 -4
- langchain/evaluation/comparison/eval_chain.py +2 -4
- langchain/evaluation/criteria/eval_chain.py +2 -4
- langchain/evaluation/embedding_distance/base.py +0 -2
- langchain/evaluation/parsing/json_schema.py +1 -1
- langchain/evaluation/qa/eval_chain.py +2 -7
- langchain/evaluation/schema.py +8 -8
- langchain/evaluation/scoring/eval_chain.py +2 -4
- langchain/evaluation/string_distance/base.py +4 -4
- langchain/hub.py +60 -26
- langchain/indexes/vectorstore.py +3 -7
- langchain/memory/entity.py +0 -2
- langchain/memory/summary.py +9 -0
- langchain/output_parsers/retry.py +1 -1
- langchain/retrievers/contextual_compression.py +0 -2
- langchain/retrievers/document_compressors/base.py +0 -2
- langchain/retrievers/document_compressors/chain_filter.py +1 -1
- langchain/retrievers/document_compressors/cohere_rerank.py +2 -4
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +1 -4
- langchain/retrievers/document_compressors/embeddings_filter.py +0 -2
- langchain/retrievers/document_compressors/listwise_rerank.py +1 -1
- langchain/retrievers/multi_query.py +4 -2
- langchain/retrievers/re_phraser.py +1 -1
- langchain/retrievers/self_query/base.py +1 -3
- langchain/retrievers/time_weighted_retriever.py +0 -2
- {langchain-0.2.12.dist-info → langchain-0.2.13.dist-info}/METADATA +2 -2
- {langchain-0.2.12.dist-info → langchain-0.2.13.dist-info}/RECORD +78 -78
- {langchain-0.2.12.dist-info → langchain-0.2.13.dist-info}/LICENSE +0 -0
- {langchain-0.2.12.dist-info → langchain-0.2.13.dist-info}/WHEEL +0 -0
- {langchain-0.2.12.dist-info → langchain-0.2.13.dist-info}/entry_points.txt +0 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from typing import Any, Optional
|
|
2
2
|
|
|
3
|
+
from langchain_core._api import deprecated
|
|
3
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
4
5
|
from langchain_core.output_parsers.openai_functions import (
|
|
5
6
|
JsonOutputFunctionsParser,
|
|
@@ -29,6 +30,21 @@ Passage:
|
|
|
29
30
|
"""
|
|
30
31
|
|
|
31
32
|
|
|
33
|
+
@deprecated(
|
|
34
|
+
since="0.2.13",
|
|
35
|
+
message=(
|
|
36
|
+
"LangChain has introduced a method called `with_structured_output` that "
|
|
37
|
+
"is available on ChatModels capable of tool calling. "
|
|
38
|
+
"See API reference for this function for replacement: "
|
|
39
|
+
"<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain.html> " # noqa: E501
|
|
40
|
+
"You can read more about `with_structured_output` here: "
|
|
41
|
+
"<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
|
|
42
|
+
"If you notice other issues, please provide "
|
|
43
|
+
"feedback here: "
|
|
44
|
+
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
45
|
+
),
|
|
46
|
+
removal="1.0",
|
|
47
|
+
)
|
|
32
48
|
def create_tagging_chain(
|
|
33
49
|
schema: dict,
|
|
34
50
|
llm: BaseLanguageModel,
|
|
@@ -38,6 +54,32 @@ def create_tagging_chain(
|
|
|
38
54
|
"""Create a chain that extracts information from a passage
|
|
39
55
|
based on a schema.
|
|
40
56
|
|
|
57
|
+
This function is deprecated. Please use `with_structured_output` instead.
|
|
58
|
+
See example usage below:
|
|
59
|
+
|
|
60
|
+
.. code-block:: python
|
|
61
|
+
|
|
62
|
+
from typing_extensions import Annotated, TypedDict
|
|
63
|
+
from langchain_anthropic import ChatAnthropic
|
|
64
|
+
|
|
65
|
+
class Joke(TypedDict):
|
|
66
|
+
\"\"\"Tagged joke.\"\"\"
|
|
67
|
+
|
|
68
|
+
setup: Annotated[str, ..., "The setup of the joke"]
|
|
69
|
+
punchline: Annotated[str, ..., "The punchline of the joke"]
|
|
70
|
+
|
|
71
|
+
# Or any other chat model that supports tools.
|
|
72
|
+
# Please reference to to the documentation of structured_output
|
|
73
|
+
# to see an up to date list of which models support
|
|
74
|
+
# with_structured_output.
|
|
75
|
+
model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0)
|
|
76
|
+
structured_llm = model.with_structured_output(Joke)
|
|
77
|
+
structured_llm.invoke(
|
|
78
|
+
"Why did the cat cross the road? To get to the other "
|
|
79
|
+
"side... and then lay down in the middle of it!"
|
|
80
|
+
)
|
|
81
|
+
Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
|
|
82
|
+
|
|
41
83
|
Args:
|
|
42
84
|
schema: The schema of the entities to extract.
|
|
43
85
|
llm: The language model to use.
|
|
@@ -59,6 +101,21 @@ def create_tagging_chain(
|
|
|
59
101
|
return chain
|
|
60
102
|
|
|
61
103
|
|
|
104
|
+
@deprecated(
|
|
105
|
+
since="0.2.13",
|
|
106
|
+
message=(
|
|
107
|
+
"LangChain has introduced a method called `with_structured_output` that "
|
|
108
|
+
"is available on ChatModels capable of tool calling. "
|
|
109
|
+
"See API reference for this function for replacement: "
|
|
110
|
+
"<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain_pydantic.html> " # noqa: E501
|
|
111
|
+
"You can read more about `with_structured_output` here: "
|
|
112
|
+
"<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
|
|
113
|
+
"If you notice other issues, please provide "
|
|
114
|
+
"feedback here: "
|
|
115
|
+
"<https://github.com/langchain-ai/langchain/discussions/18154>"
|
|
116
|
+
),
|
|
117
|
+
removal="1.0",
|
|
118
|
+
)
|
|
62
119
|
def create_tagging_chain_pydantic(
|
|
63
120
|
pydantic_schema: Any,
|
|
64
121
|
llm: BaseLanguageModel,
|
|
@@ -68,6 +125,30 @@ def create_tagging_chain_pydantic(
|
|
|
68
125
|
"""Create a chain that extracts information from a passage
|
|
69
126
|
based on a pydantic schema.
|
|
70
127
|
|
|
128
|
+
This function is deprecated. Please use `with_structured_output` instead.
|
|
129
|
+
See example usage below:
|
|
130
|
+
|
|
131
|
+
.. code-block:: python
|
|
132
|
+
|
|
133
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
134
|
+
from langchain_anthropic import ChatAnthropic
|
|
135
|
+
|
|
136
|
+
class Joke(BaseModel):
|
|
137
|
+
setup: str = Field(description="The setup of the joke")
|
|
138
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
139
|
+
|
|
140
|
+
# Or any other chat model that supports tools.
|
|
141
|
+
# Please reference to to the documentation of structured_output
|
|
142
|
+
# to see an up to date list of which models support
|
|
143
|
+
# with_structured_output.
|
|
144
|
+
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
145
|
+
structured_llm = model.with_structured_output(Joke)
|
|
146
|
+
structured_llm.invoke(
|
|
147
|
+
"Why did the cat cross the road? To get to the other "
|
|
148
|
+
"side... and then lay down in the middle of it!"
|
|
149
|
+
)
|
|
150
|
+
Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
|
|
151
|
+
|
|
71
152
|
Args:
|
|
72
153
|
pydantic_schema: The pydantic schema of the entities to extract.
|
|
73
154
|
llm: The language model to use.
|
|
@@ -7,6 +7,7 @@ import re
|
|
|
7
7
|
from abc import ABC, abstractmethod
|
|
8
8
|
from typing import Any, Dict, List, Optional, Tuple
|
|
9
9
|
|
|
10
|
+
from langchain_core._api import deprecated
|
|
10
11
|
from langchain_core.callbacks import (
|
|
11
12
|
AsyncCallbackManagerForChainRun,
|
|
12
13
|
CallbackManagerForChainRun,
|
|
@@ -14,7 +15,7 @@ from langchain_core.callbacks import (
|
|
|
14
15
|
from langchain_core.documents import Document
|
|
15
16
|
from langchain_core.language_models import BaseLanguageModel
|
|
16
17
|
from langchain_core.prompts import BasePromptTemplate
|
|
17
|
-
from langchain_core.pydantic_v1 import
|
|
18
|
+
from langchain_core.pydantic_v1 import root_validator
|
|
18
19
|
|
|
19
20
|
from langchain.chains import ReduceDocumentsChain
|
|
20
21
|
from langchain.chains.base import Chain
|
|
@@ -30,6 +31,15 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
|
|
|
30
31
|
)
|
|
31
32
|
|
|
32
33
|
|
|
34
|
+
@deprecated(
|
|
35
|
+
since="0.2.13",
|
|
36
|
+
removal="1.0",
|
|
37
|
+
message=(
|
|
38
|
+
"This class is deprecated. Refer to this guide on retrieval and question "
|
|
39
|
+
"answering with sources: "
|
|
40
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
|
|
41
|
+
),
|
|
42
|
+
)
|
|
33
43
|
class BaseQAWithSourcesChain(Chain, ABC):
|
|
34
44
|
"""Question answering chain with sources over documents."""
|
|
35
45
|
|
|
@@ -88,10 +98,8 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
88
98
|
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
|
89
99
|
|
|
90
100
|
class Config:
|
|
91
|
-
"""Configuration for this pydantic object."""
|
|
92
|
-
|
|
93
|
-
extra = Extra.forbid
|
|
94
101
|
arbitrary_types_allowed = True
|
|
102
|
+
extra = "forbid"
|
|
95
103
|
|
|
96
104
|
@property
|
|
97
105
|
def input_keys(self) -> List[str]:
|
|
@@ -200,6 +208,15 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
200
208
|
return result
|
|
201
209
|
|
|
202
210
|
|
|
211
|
+
@deprecated(
|
|
212
|
+
since="0.2.13",
|
|
213
|
+
removal="1.0",
|
|
214
|
+
message=(
|
|
215
|
+
"This class is deprecated. Refer to this guide on retrieval and question "
|
|
216
|
+
"answering with sources: "
|
|
217
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
|
|
218
|
+
),
|
|
219
|
+
)
|
|
203
220
|
class QAWithSourcesChain(BaseQAWithSourcesChain):
|
|
204
221
|
"""Question answering with sources over documents."""
|
|
205
222
|
|
|
@@ -4,6 +4,7 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from typing import Any, Mapping, Optional, Protocol
|
|
6
6
|
|
|
7
|
+
from langchain_core._api import deprecated
|
|
7
8
|
from langchain_core.language_models import BaseLanguageModel
|
|
8
9
|
from langchain_core.prompts import BasePromptTemplate
|
|
9
10
|
|
|
@@ -151,6 +152,21 @@ def _load_refine_chain(
|
|
|
151
152
|
)
|
|
152
153
|
|
|
153
154
|
|
|
155
|
+
@deprecated(
|
|
156
|
+
since="0.2.13",
|
|
157
|
+
removal="1.0",
|
|
158
|
+
message=(
|
|
159
|
+
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
160
|
+
"answering with sources: "
|
|
161
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
|
|
162
|
+
"\nSee also the following migration guides for replacements "
|
|
163
|
+
"based on `chain_type`:\n"
|
|
164
|
+
"stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
|
|
165
|
+
"map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
|
|
166
|
+
"refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
|
|
167
|
+
"map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
|
|
168
|
+
),
|
|
169
|
+
)
|
|
154
170
|
def load_qa_with_sources_chain(
|
|
155
171
|
llm: BaseLanguageModel,
|
|
156
172
|
chain_type: str = "stuff",
|
|
@@ -5,6 +5,7 @@ from __future__ import annotations
|
|
|
5
5
|
import json
|
|
6
6
|
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
|
|
7
7
|
|
|
8
|
+
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.exceptions import OutputParserException
|
|
9
10
|
from langchain_core.language_models import BaseLanguageModel
|
|
10
11
|
from langchain_core.output_parsers import BaseOutputParser
|
|
@@ -217,7 +218,7 @@ def get_query_constructor_prompt(
|
|
|
217
218
|
enable_limit: Whether to enable the limit operator. Defaults to False.
|
|
218
219
|
schema_prompt: Prompt for describing query schema. Should have string input
|
|
219
220
|
variables allowed_comparators and allowed_operators.
|
|
220
|
-
|
|
221
|
+
kwargs: Additional named params to pass to FewShotPromptTemplate init.
|
|
221
222
|
|
|
222
223
|
Returns:
|
|
223
224
|
A prompt template that can be used to construct queries.
|
|
@@ -257,6 +258,11 @@ def get_query_constructor_prompt(
|
|
|
257
258
|
)
|
|
258
259
|
|
|
259
260
|
|
|
261
|
+
@deprecated(
|
|
262
|
+
since="0.2.13",
|
|
263
|
+
alternative="load_query_constructor_runnable",
|
|
264
|
+
removal="1.0",
|
|
265
|
+
)
|
|
260
266
|
def load_query_constructor_chain(
|
|
261
267
|
llm: BaseLanguageModel,
|
|
262
268
|
document_contents: str,
|
|
@@ -339,7 +345,7 @@ def load_query_constructor_runnable(
|
|
|
339
345
|
variables allowed_comparators and allowed_operators.
|
|
340
346
|
fix_invalid: Whether to fix invalid filter directives by ignoring invalid
|
|
341
347
|
operators, comparators and attributes.
|
|
342
|
-
|
|
348
|
+
kwargs: Additional named params to pass to FewShotPromptTemplate init.
|
|
343
349
|
|
|
344
350
|
Returns:
|
|
345
351
|
A Runnable that can be used to construct queries.
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import Any, Mapping, Optional, Protocol
|
|
4
4
|
|
|
5
|
+
from langchain_core._api import deprecated
|
|
5
6
|
from langchain_core.callbacks import BaseCallbackManager, Callbacks
|
|
6
7
|
from langchain_core.language_models import BaseLanguageModel
|
|
7
8
|
from langchain_core.prompts import BasePromptTemplate
|
|
@@ -216,6 +217,20 @@ def _load_refine_chain(
|
|
|
216
217
|
)
|
|
217
218
|
|
|
218
219
|
|
|
220
|
+
@deprecated(
|
|
221
|
+
since="0.2.13",
|
|
222
|
+
removal="1.0",
|
|
223
|
+
message=(
|
|
224
|
+
"This class is deprecated. See the following migration guides for replacements "
|
|
225
|
+
"based on `chain_type`:\n"
|
|
226
|
+
"stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
|
|
227
|
+
"map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
|
|
228
|
+
"refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
|
|
229
|
+
"map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
|
|
230
|
+
"\nSee also guides on retrieval and question-answering here: "
|
|
231
|
+
"https://python.langchain.com/v0.2/docs/how_to/#qa-with-rag"
|
|
232
|
+
),
|
|
233
|
+
)
|
|
219
234
|
def load_qa_chain(
|
|
220
235
|
llm: BaseLanguageModel,
|
|
221
236
|
chain_type: str = "stuff",
|
|
@@ -16,7 +16,7 @@ from langchain_core.callbacks import (
|
|
|
16
16
|
from langchain_core.documents import Document
|
|
17
17
|
from langchain_core.language_models import BaseLanguageModel
|
|
18
18
|
from langchain_core.prompts import PromptTemplate
|
|
19
|
-
from langchain_core.pydantic_v1 import
|
|
19
|
+
from langchain_core.pydantic_v1 import Field, root_validator
|
|
20
20
|
from langchain_core.retrievers import BaseRetriever
|
|
21
21
|
from langchain_core.vectorstores import VectorStore
|
|
22
22
|
|
|
@@ -28,6 +28,15 @@ from langchain.chains.question_answering import load_qa_chain
|
|
|
28
28
|
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
|
|
29
29
|
|
|
30
30
|
|
|
31
|
+
@deprecated(
|
|
32
|
+
since="0.2.13",
|
|
33
|
+
removal="1.0",
|
|
34
|
+
message=(
|
|
35
|
+
"This class is deprecated. Use the `create_retrieval_chain` constructor "
|
|
36
|
+
"instead. See migration guide here: "
|
|
37
|
+
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
|
|
38
|
+
),
|
|
39
|
+
)
|
|
31
40
|
class BaseRetrievalQA(Chain):
|
|
32
41
|
"""Base class for question-answering chains."""
|
|
33
42
|
|
|
@@ -39,11 +48,9 @@ class BaseRetrievalQA(Chain):
|
|
|
39
48
|
"""Return the source documents or not."""
|
|
40
49
|
|
|
41
50
|
class Config:
|
|
42
|
-
"""Configuration for this pydantic object."""
|
|
43
|
-
|
|
44
|
-
extra = Extra.forbid
|
|
45
|
-
arbitrary_types_allowed = True
|
|
46
51
|
allow_population_by_field_name = True
|
|
52
|
+
arbitrary_types_allowed = True
|
|
53
|
+
extra = "forbid"
|
|
47
54
|
|
|
48
55
|
@property
|
|
49
56
|
def input_keys(self) -> List[str]:
|
|
@@ -196,7 +203,15 @@ class BaseRetrievalQA(Chain):
|
|
|
196
203
|
return {self.output_key: answer}
|
|
197
204
|
|
|
198
205
|
|
|
199
|
-
@deprecated(
|
|
206
|
+
@deprecated(
|
|
207
|
+
since="0.1.17",
|
|
208
|
+
removal="1.0",
|
|
209
|
+
message=(
|
|
210
|
+
"This class is deprecated. Use the `create_retrieval_chain` constructor "
|
|
211
|
+
"instead. See migration guide here: "
|
|
212
|
+
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
|
|
213
|
+
),
|
|
214
|
+
)
|
|
200
215
|
class RetrievalQA(BaseRetrievalQA):
|
|
201
216
|
"""Chain for question-answering against an index.
|
|
202
217
|
|
|
@@ -273,6 +288,15 @@ class RetrievalQA(BaseRetrievalQA):
|
|
|
273
288
|
return "retrieval_qa"
|
|
274
289
|
|
|
275
290
|
|
|
291
|
+
@deprecated(
|
|
292
|
+
since="0.2.13",
|
|
293
|
+
removal="1.0",
|
|
294
|
+
message=(
|
|
295
|
+
"This class is deprecated. Use the `create_retrieval_chain` constructor "
|
|
296
|
+
"instead. See migration guide here: "
|
|
297
|
+
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
|
|
298
|
+
),
|
|
299
|
+
)
|
|
276
300
|
class VectorDBQA(BaseRetrievalQA):
|
|
277
301
|
"""Chain for question-answering against a vector database."""
|
|
278
302
|
|
langchain/chains/router/base.py
CHANGED
|
@@ -10,7 +10,6 @@ from langchain_core.callbacks import (
|
|
|
10
10
|
CallbackManagerForChainRun,
|
|
11
11
|
Callbacks,
|
|
12
12
|
)
|
|
13
|
-
from langchain_core.pydantic_v1 import Extra
|
|
14
13
|
|
|
15
14
|
from langchain.chains.base import Chain
|
|
16
15
|
|
|
@@ -62,10 +61,8 @@ class MultiRouteChain(Chain):
|
|
|
62
61
|
Defaults to False."""
|
|
63
62
|
|
|
64
63
|
class Config:
|
|
65
|
-
"""Configuration for this pydantic object."""
|
|
66
|
-
|
|
67
|
-
extra = Extra.forbid
|
|
68
64
|
arbitrary_types_allowed = True
|
|
65
|
+
extra = "forbid"
|
|
69
66
|
|
|
70
67
|
@property
|
|
71
68
|
def input_keys(self) -> List[str]:
|
|
@@ -8,7 +8,6 @@ from langchain_core.callbacks import (
|
|
|
8
8
|
)
|
|
9
9
|
from langchain_core.documents import Document
|
|
10
10
|
from langchain_core.embeddings import Embeddings
|
|
11
|
-
from langchain_core.pydantic_v1 import Extra
|
|
12
11
|
from langchain_core.vectorstores import VectorStore
|
|
13
12
|
|
|
14
13
|
from langchain.chains.router.base import RouterChain
|
|
@@ -21,10 +20,8 @@ class EmbeddingRouterChain(RouterChain):
|
|
|
21
20
|
routing_keys: List[str] = ["query"]
|
|
22
21
|
|
|
23
22
|
class Config:
|
|
24
|
-
"""Configuration for this pydantic object."""
|
|
25
|
-
|
|
26
|
-
extra = Extra.forbid
|
|
27
23
|
arbitrary_types_allowed = True
|
|
24
|
+
extra = "forbid"
|
|
28
25
|
|
|
29
26
|
@property
|
|
30
27
|
def input_keys(self) -> List[str]:
|
|
@@ -4,6 +4,7 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from typing import Any, Dict, List, Optional, Type, cast
|
|
6
6
|
|
|
7
|
+
from langchain_core._api import deprecated
|
|
7
8
|
from langchain_core.callbacks import (
|
|
8
9
|
AsyncCallbackManagerForChainRun,
|
|
9
10
|
CallbackManagerForChainRun,
|
|
@@ -19,8 +20,82 @@ from langchain.chains import LLMChain
|
|
|
19
20
|
from langchain.chains.router.base import RouterChain
|
|
20
21
|
|
|
21
22
|
|
|
23
|
+
@deprecated(
|
|
24
|
+
since="0.2.12",
|
|
25
|
+
removal="1.0",
|
|
26
|
+
message=(
|
|
27
|
+
"Use RunnableLambda to select from multiple prompt templates. See example "
|
|
28
|
+
"in API reference: "
|
|
29
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html" # noqa: E501
|
|
30
|
+
),
|
|
31
|
+
)
|
|
22
32
|
class LLMRouterChain(RouterChain):
|
|
23
|
-
"""A router chain that uses an LLM chain to perform routing.
|
|
33
|
+
"""A router chain that uses an LLM chain to perform routing.
|
|
34
|
+
|
|
35
|
+
This class is deprecated. See below for a replacement, which offers several
|
|
36
|
+
benefits, including streaming and batch support.
|
|
37
|
+
|
|
38
|
+
Below is an example implementation:
|
|
39
|
+
|
|
40
|
+
.. code-block:: python
|
|
41
|
+
|
|
42
|
+
from operator import itemgetter
|
|
43
|
+
from typing import Literal
|
|
44
|
+
from typing_extensions import TypedDict
|
|
45
|
+
|
|
46
|
+
from langchain_core.output_parsers import StrOutputParser
|
|
47
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
48
|
+
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
|
|
49
|
+
from langchain_openai import ChatOpenAI
|
|
50
|
+
|
|
51
|
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
|
52
|
+
|
|
53
|
+
prompt_1 = ChatPromptTemplate.from_messages(
|
|
54
|
+
[
|
|
55
|
+
("system", "You are an expert on animals."),
|
|
56
|
+
("human", "{query}"),
|
|
57
|
+
]
|
|
58
|
+
)
|
|
59
|
+
prompt_2 = ChatPromptTemplate.from_messages(
|
|
60
|
+
[
|
|
61
|
+
("system", "You are an expert on vegetables."),
|
|
62
|
+
("human", "{query}"),
|
|
63
|
+
]
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
chain_1 = prompt_1 | llm | StrOutputParser()
|
|
67
|
+
chain_2 = prompt_2 | llm | StrOutputParser()
|
|
68
|
+
|
|
69
|
+
route_system = "Route the user's query to either the animal or vegetable expert."
|
|
70
|
+
route_prompt = ChatPromptTemplate.from_messages(
|
|
71
|
+
[
|
|
72
|
+
("system", route_system),
|
|
73
|
+
("human", "{query}"),
|
|
74
|
+
]
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class RouteQuery(TypedDict):
|
|
79
|
+
\"\"\"Route query to destination.\"\"\"
|
|
80
|
+
destination: Literal["animal", "vegetable"]
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
route_chain = (
|
|
84
|
+
route_prompt
|
|
85
|
+
| llm.with_structured_output(RouteQuery)
|
|
86
|
+
| itemgetter("destination")
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
chain = {
|
|
90
|
+
"destination": route_chain, # "animal" or "vegetable"
|
|
91
|
+
"query": lambda x: x["query"], # pass through input query
|
|
92
|
+
} | RunnableLambda(
|
|
93
|
+
# if animal, chain_1. otherwise, chain_2.
|
|
94
|
+
lambda x: chain_1 if x["destination"] == "animal" else chain_2,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
chain.invoke({"query": "what color are carrots"})
|
|
98
|
+
""" # noqa: E501
|
|
24
99
|
|
|
25
100
|
llm_chain: LLMChain
|
|
26
101
|
"""LLM chain used to perform routing"""
|
|
@@ -4,6 +4,7 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from typing import Any, Dict, List, Optional
|
|
6
6
|
|
|
7
|
+
from langchain_core._api import deprecated
|
|
7
8
|
from langchain_core.language_models import BaseLanguageModel
|
|
8
9
|
from langchain_core.prompts import PromptTemplate
|
|
9
10
|
|
|
@@ -15,8 +16,82 @@ from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParse
|
|
|
15
16
|
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
|
|
16
17
|
|
|
17
18
|
|
|
19
|
+
@deprecated(
|
|
20
|
+
since="0.2.12",
|
|
21
|
+
removal="1.0",
|
|
22
|
+
message=(
|
|
23
|
+
"Use RunnableLambda to select from multiple prompt templates. See example "
|
|
24
|
+
"in API reference: "
|
|
25
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html" # noqa: E501
|
|
26
|
+
),
|
|
27
|
+
)
|
|
18
28
|
class MultiPromptChain(MultiRouteChain):
|
|
19
|
-
"""A multi-route chain that uses an LLM router chain to choose amongst prompts.
|
|
29
|
+
"""A multi-route chain that uses an LLM router chain to choose amongst prompts.
|
|
30
|
+
|
|
31
|
+
This class is deprecated. See below for a replacement, which offers several
|
|
32
|
+
benefits, including streaming and batch support.
|
|
33
|
+
|
|
34
|
+
Below is an example implementation:
|
|
35
|
+
|
|
36
|
+
.. code-block:: python
|
|
37
|
+
|
|
38
|
+
from operator import itemgetter
|
|
39
|
+
from typing import Literal
|
|
40
|
+
from typing_extensions import TypedDict
|
|
41
|
+
|
|
42
|
+
from langchain_core.output_parsers import StrOutputParser
|
|
43
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
44
|
+
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
|
|
45
|
+
from langchain_openai import ChatOpenAI
|
|
46
|
+
|
|
47
|
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
|
48
|
+
|
|
49
|
+
prompt_1 = ChatPromptTemplate.from_messages(
|
|
50
|
+
[
|
|
51
|
+
("system", "You are an expert on animals."),
|
|
52
|
+
("human", "{query}"),
|
|
53
|
+
]
|
|
54
|
+
)
|
|
55
|
+
prompt_2 = ChatPromptTemplate.from_messages(
|
|
56
|
+
[
|
|
57
|
+
("system", "You are an expert on vegetables."),
|
|
58
|
+
("human", "{query}"),
|
|
59
|
+
]
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
chain_1 = prompt_1 | llm | StrOutputParser()
|
|
63
|
+
chain_2 = prompt_2 | llm | StrOutputParser()
|
|
64
|
+
|
|
65
|
+
route_system = "Route the user's query to either the animal or vegetable expert."
|
|
66
|
+
route_prompt = ChatPromptTemplate.from_messages(
|
|
67
|
+
[
|
|
68
|
+
("system", route_system),
|
|
69
|
+
("human", "{query}"),
|
|
70
|
+
]
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class RouteQuery(TypedDict):
|
|
75
|
+
\"\"\"Route query to destination.\"\"\"
|
|
76
|
+
destination: Literal["animal", "vegetable"]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
route_chain = (
|
|
80
|
+
route_prompt
|
|
81
|
+
| llm.with_structured_output(RouteQuery)
|
|
82
|
+
| itemgetter("destination")
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
chain = {
|
|
86
|
+
"destination": route_chain, # "animal" or "vegetable"
|
|
87
|
+
"query": lambda x: x["query"], # pass through input query
|
|
88
|
+
} | RunnableLambda(
|
|
89
|
+
# if animal, chain_1. otherwise, chain_2.
|
|
90
|
+
lambda x: chain_1 if x["destination"] == "animal" else chain_2,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
chain.invoke({"query": "what color are carrots"})
|
|
94
|
+
""" # noqa: E501
|
|
20
95
|
|
|
21
96
|
@property
|
|
22
97
|
def output_keys(self) -> List[str]:
|
langchain/chains/sequential.py
CHANGED
|
@@ -6,7 +6,7 @@ from langchain_core.callbacks import (
|
|
|
6
6
|
AsyncCallbackManagerForChainRun,
|
|
7
7
|
CallbackManagerForChainRun,
|
|
8
8
|
)
|
|
9
|
-
from langchain_core.pydantic_v1 import
|
|
9
|
+
from langchain_core.pydantic_v1 import root_validator
|
|
10
10
|
from langchain_core.utils.input import get_color_mapping
|
|
11
11
|
|
|
12
12
|
from langchain.chains.base import Chain
|
|
@@ -21,10 +21,8 @@ class SequentialChain(Chain):
|
|
|
21
21
|
return_all: bool = False
|
|
22
22
|
|
|
23
23
|
class Config:
|
|
24
|
-
"""Configuration for this pydantic object."""
|
|
25
|
-
|
|
26
|
-
extra = Extra.forbid
|
|
27
24
|
arbitrary_types_allowed = True
|
|
25
|
+
extra = "forbid"
|
|
28
26
|
|
|
29
27
|
@property
|
|
30
28
|
def input_keys(self) -> List[str]:
|
|
@@ -132,10 +130,8 @@ class SimpleSequentialChain(Chain):
|
|
|
132
130
|
output_key: str = "output" #: :meta private:
|
|
133
131
|
|
|
134
132
|
class Config:
|
|
135
|
-
"""Configuration for this pydantic object."""
|
|
136
|
-
|
|
137
|
-
extra = Extra.forbid
|
|
138
133
|
arbitrary_types_allowed = True
|
|
134
|
+
extra = "forbid"
|
|
139
135
|
|
|
140
136
|
@property
|
|
141
137
|
def input_keys(self) -> List[str]:
|
|
@@ -224,7 +224,7 @@ def create_structured_output_runnable(
|
|
|
224
224
|
structured outputs or a single one. If True and model does not return any
|
|
225
225
|
structured outputs then chain output is None. If False and model does not
|
|
226
226
|
return any structured outputs then chain output is an empty list.
|
|
227
|
-
|
|
227
|
+
kwargs: Additional named arguments.
|
|
228
228
|
|
|
229
229
|
Returns:
|
|
230
230
|
A runnable sequence that will return a structured output(s) matching the given
|
langchain/chat_models/base.py
CHANGED
|
@@ -347,16 +347,14 @@ def _init_chat_model_helper(
|
|
|
347
347
|
_check_pkg("langchain_ollama")
|
|
348
348
|
from langchain_ollama import ChatOllama
|
|
349
349
|
except ImportError:
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
# an error related to langchain-ollama
|
|
359
|
-
_check_pkg("langchain_ollama")
|
|
350
|
+
# For backwards compatibility
|
|
351
|
+
try:
|
|
352
|
+
_check_pkg("langchain_community")
|
|
353
|
+
from langchain_community.chat_models import ChatOllama
|
|
354
|
+
except ImportError:
|
|
355
|
+
# If both langchain-ollama and langchain-community aren't available,
|
|
356
|
+
# raise an error related to langchain-ollama
|
|
357
|
+
_check_pkg("langchain_ollama")
|
|
360
358
|
|
|
361
359
|
return ChatOllama(model=model, **kwargs)
|
|
362
360
|
elif model_provider == "together":
|
|
@@ -28,7 +28,7 @@ from langchain_core.exceptions import OutputParserException
|
|
|
28
28
|
from langchain_core.language_models import BaseLanguageModel
|
|
29
29
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
30
30
|
from langchain_core.output_parsers import BaseOutputParser
|
|
31
|
-
from langchain_core.pydantic_v1 import
|
|
31
|
+
from langchain_core.pydantic_v1 import Field
|
|
32
32
|
from langchain_core.tools import BaseTool
|
|
33
33
|
|
|
34
34
|
from langchain.chains.llm import LLMChain
|
|
@@ -157,9 +157,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
|
|
|
157
157
|
"""DEPRECATED. Reasoning always returned."""
|
|
158
158
|
|
|
159
159
|
class Config:
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
extra = Extra.ignore
|
|
160
|
+
extra = "ignore"
|
|
163
161
|
|
|
164
162
|
@property
|
|
165
163
|
def requires_reference(self) -> bool:
|