langchain 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/agent.py +5 -9
- langchain/agents/agent_toolkits/vectorstore/base.py +114 -2
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +0 -6
- langchain/agents/chat/output_parser.py +2 -2
- langchain/agents/initialize.py +1 -1
- langchain/agents/loading.py +2 -2
- langchain/agents/mrkl/base.py +1 -1
- langchain/agents/openai_assistant/base.py +2 -2
- langchain/agents/openai_functions_agent/base.py +1 -1
- langchain/agents/openai_functions_multi_agent/base.py +1 -1
- langchain/agents/output_parsers/react_json_single_input.py +2 -2
- langchain/agents/structured_chat/output_parser.py +2 -2
- langchain/chains/__init__.py +1 -0
- langchain/chains/api/base.py +121 -1
- langchain/chains/base.py +0 -2
- langchain/chains/combine_documents/map_reduce.py +2 -4
- langchain/chains/combine_documents/map_rerank.py +4 -6
- langchain/chains/combine_documents/reduce.py +1 -4
- langchain/chains/combine_documents/refine.py +2 -4
- langchain/chains/combine_documents/stuff.py +12 -4
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +4 -6
- langchain/chains/elasticsearch_database/base.py +16 -20
- langchain/chains/example_generator.py +3 -4
- langchain/chains/flare/base.py +1 -1
- langchain/chains/hyde/base.py +1 -4
- langchain/chains/llm.py +2 -4
- langchain/chains/llm_checker/base.py +12 -4
- langchain/chains/llm_math/base.py +2 -4
- langchain/chains/llm_summarization_checker/base.py +12 -4
- langchain/chains/loading.py +17 -0
- langchain/chains/mapreduce.py +12 -4
- langchain/chains/natbot/base.py +2 -4
- langchain/chains/openai_functions/__init__.py +2 -0
- langchain/chains/openai_functions/citation_fuzzy_match.py +54 -1
- langchain/chains/openai_functions/openapi.py +88 -1
- langchain/chains/openai_functions/qa_with_structure.py +19 -0
- langchain/chains/openai_functions/tagging.py +81 -0
- langchain/chains/qa_with_sources/base.py +21 -4
- langchain/chains/qa_with_sources/loading.py +16 -0
- langchain/chains/query_constructor/base.py +8 -2
- langchain/chains/query_constructor/schema.py +0 -2
- langchain/chains/question_answering/chain.py +15 -0
- langchain/chains/retrieval_qa/base.py +30 -6
- langchain/chains/router/base.py +1 -4
- langchain/chains/router/embedding_router.py +1 -4
- langchain/chains/router/llm_router.py +76 -1
- langchain/chains/router/multi_prompt.py +76 -1
- langchain/chains/sequential.py +3 -7
- langchain/chains/structured_output/base.py +1 -1
- langchain/chat_models/base.py +26 -3
- langchain/evaluation/agents/trajectory_eval_chain.py +3 -6
- langchain/evaluation/comparison/eval_chain.py +2 -4
- langchain/evaluation/criteria/eval_chain.py +2 -4
- langchain/evaluation/embedding_distance/base.py +3 -4
- langchain/evaluation/parsing/json_schema.py +1 -1
- langchain/evaluation/qa/eval_chain.py +2 -7
- langchain/evaluation/schema.py +8 -8
- langchain/evaluation/scoring/eval_chain.py +2 -4
- langchain/evaluation/string_distance/base.py +7 -6
- langchain/hub.py +60 -26
- langchain/indexes/vectorstore.py +3 -7
- langchain/memory/buffer.py +2 -2
- langchain/memory/entity.py +0 -2
- langchain/memory/summary.py +12 -2
- langchain/memory/summary_buffer.py +2 -2
- langchain/output_parsers/combining.py +2 -2
- langchain/output_parsers/enum.py +2 -2
- langchain/output_parsers/fix.py +4 -5
- langchain/output_parsers/retry.py +3 -3
- langchain/retrievers/contextual_compression.py +0 -2
- langchain/retrievers/document_compressors/base.py +0 -2
- langchain/retrievers/document_compressors/chain_filter.py +1 -1
- langchain/retrievers/document_compressors/cohere_rerank.py +2 -4
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +1 -4
- langchain/retrievers/document_compressors/embeddings_filter.py +3 -4
- langchain/retrievers/document_compressors/listwise_rerank.py +1 -1
- langchain/retrievers/multi_query.py +4 -2
- langchain/retrievers/re_phraser.py +1 -1
- langchain/retrievers/self_query/base.py +11 -3
- langchain/retrievers/time_weighted_retriever.py +0 -2
- {langchain-0.2.11.dist-info → langchain-0.2.13.dist-info}/METADATA +2 -2
- {langchain-0.2.11.dist-info → langchain-0.2.13.dist-info}/RECORD +86 -86
- {langchain-0.2.11.dist-info → langchain-0.2.13.dist-info}/LICENSE +0 -0
- {langchain-0.2.11.dist-info → langchain-0.2.13.dist-info}/WHEEL +0 -0
- {langchain-0.2.11.dist-info → langchain-0.2.13.dist-info}/entry_points.txt +0 -0
langchain/agents/agent.py
CHANGED
|
@@ -156,7 +156,7 @@ class BaseSingleActionAgent(BaseModel):
|
|
|
156
156
|
llm: Language model to use.
|
|
157
157
|
tools: Tools to use.
|
|
158
158
|
callback_manager: Callback manager to use.
|
|
159
|
-
|
|
159
|
+
kwargs: Additional arguments.
|
|
160
160
|
|
|
161
161
|
Returns:
|
|
162
162
|
BaseSingleActionAgent: Agent object.
|
|
@@ -420,8 +420,6 @@ class RunnableAgent(BaseSingleActionAgent):
|
|
|
420
420
|
"""
|
|
421
421
|
|
|
422
422
|
class Config:
|
|
423
|
-
"""Configuration for this pydantic object."""
|
|
424
|
-
|
|
425
423
|
arbitrary_types_allowed = True
|
|
426
424
|
|
|
427
425
|
@property
|
|
@@ -530,8 +528,6 @@ class RunnableMultiActionAgent(BaseMultiActionAgent):
|
|
|
530
528
|
"""
|
|
531
529
|
|
|
532
530
|
class Config:
|
|
533
|
-
"""Configuration for this pydantic object."""
|
|
534
|
-
|
|
535
531
|
arbitrary_types_allowed = True
|
|
536
532
|
|
|
537
533
|
@property
|
|
@@ -939,7 +935,7 @@ class Agent(BaseSingleActionAgent):
|
|
|
939
935
|
tools: Tools to use.
|
|
940
936
|
callback_manager: Callback manager to use.
|
|
941
937
|
output_parser: Output parser to use.
|
|
942
|
-
|
|
938
|
+
kwargs: Additional arguments.
|
|
943
939
|
|
|
944
940
|
Returns:
|
|
945
941
|
Agent: Agent object.
|
|
@@ -1110,7 +1106,7 @@ class AgentExecutor(Chain):
|
|
|
1110
1106
|
agent: Agent to use.
|
|
1111
1107
|
tools: Tools to use.
|
|
1112
1108
|
callbacks: Callbacks to use.
|
|
1113
|
-
|
|
1109
|
+
kwargs: Additional arguments.
|
|
1114
1110
|
|
|
1115
1111
|
Returns:
|
|
1116
1112
|
AgentExecutor: Agent executor object.
|
|
@@ -1741,7 +1737,7 @@ class AgentExecutor(Chain):
|
|
|
1741
1737
|
Args:
|
|
1742
1738
|
input: Input to the agent.
|
|
1743
1739
|
config: Config to use.
|
|
1744
|
-
|
|
1740
|
+
kwargs: Additional arguments.
|
|
1745
1741
|
|
|
1746
1742
|
Yields:
|
|
1747
1743
|
AddableDict: Addable dictionary.
|
|
@@ -1772,7 +1768,7 @@ class AgentExecutor(Chain):
|
|
|
1772
1768
|
Args:
|
|
1773
1769
|
input: Input to the agent.
|
|
1774
1770
|
config: Config to use.
|
|
1775
|
-
|
|
1771
|
+
kwargs: Additional arguments.
|
|
1776
1772
|
|
|
1777
1773
|
Yields:
|
|
1778
1774
|
AddableDict: Addable dictionary.
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import Any, Dict, Optional
|
|
4
4
|
|
|
5
|
+
from langchain_core._api import deprecated
|
|
5
6
|
from langchain_core.callbacks.base import BaseCallbackManager
|
|
6
7
|
from langchain_core.language_models import BaseLanguageModel
|
|
7
8
|
|
|
@@ -15,6 +16,16 @@ from langchain.agents.mrkl.base import ZeroShotAgent
|
|
|
15
16
|
from langchain.chains.llm import LLMChain
|
|
16
17
|
|
|
17
18
|
|
|
19
|
+
@deprecated(
|
|
20
|
+
since="0.2.13",
|
|
21
|
+
removal="1.0",
|
|
22
|
+
message=(
|
|
23
|
+
"See API reference for this function for a replacement implementation: "
|
|
24
|
+
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
|
|
25
|
+
"Read more here on how to create agents that query vector stores: "
|
|
26
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
|
|
27
|
+
),
|
|
28
|
+
)
|
|
18
29
|
def create_vectorstore_agent(
|
|
19
30
|
llm: BaseLanguageModel,
|
|
20
31
|
toolkit: VectorStoreToolkit,
|
|
@@ -26,6 +37,44 @@ def create_vectorstore_agent(
|
|
|
26
37
|
) -> AgentExecutor:
|
|
27
38
|
"""Construct a VectorStore agent from an LLM and tools.
|
|
28
39
|
|
|
40
|
+
Note: this class is deprecated. See below for a replacement that uses tool
|
|
41
|
+
calling methods and LangGraph. Install LangGraph with:
|
|
42
|
+
|
|
43
|
+
.. code-block:: bash
|
|
44
|
+
|
|
45
|
+
pip install -U langgraph
|
|
46
|
+
|
|
47
|
+
.. code-block:: python
|
|
48
|
+
|
|
49
|
+
from langchain_core.tools import create_retriever_tool
|
|
50
|
+
from langchain_core.vectorstores import InMemoryVectorStore
|
|
51
|
+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
|
52
|
+
from langgraph.prebuilt import create_react_agent
|
|
53
|
+
|
|
54
|
+
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
55
|
+
|
|
56
|
+
vector_store = InMemoryVectorStore.from_texts(
|
|
57
|
+
[
|
|
58
|
+
"Dogs are great companions, known for their loyalty and friendliness.",
|
|
59
|
+
"Cats are independent pets that often enjoy their own space.",
|
|
60
|
+
],
|
|
61
|
+
OpenAIEmbeddings(),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
tool = create_retriever_tool(
|
|
65
|
+
vector_store.as_retriever(),
|
|
66
|
+
"pet_information_retriever",
|
|
67
|
+
"Fetches information about pets.",
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
agent = create_react_agent(llm, [tool])
|
|
71
|
+
|
|
72
|
+
for step in agent.stream(
|
|
73
|
+
{"messages": [("human", "What are dogs known for?")]},
|
|
74
|
+
stream_mode="values",
|
|
75
|
+
):
|
|
76
|
+
step["messages"][-1].pretty_print()
|
|
77
|
+
|
|
29
78
|
Args:
|
|
30
79
|
llm (BaseLanguageModel): LLM that will be used by the agent
|
|
31
80
|
toolkit (VectorStoreToolkit): Set of tools for the agent
|
|
@@ -33,7 +82,7 @@ def create_vectorstore_agent(
|
|
|
33
82
|
prefix (str, optional): The prefix prompt for the agent. If not provided uses default PREFIX.
|
|
34
83
|
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
|
|
35
84
|
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
|
|
36
|
-
|
|
85
|
+
kwargs: Additional named parameters to pass to the ZeroShotAgent.
|
|
37
86
|
|
|
38
87
|
Returns:
|
|
39
88
|
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response
|
|
@@ -56,6 +105,16 @@ def create_vectorstore_agent(
|
|
|
56
105
|
)
|
|
57
106
|
|
|
58
107
|
|
|
108
|
+
@deprecated(
|
|
109
|
+
since="0.2.13",
|
|
110
|
+
removal="1.0",
|
|
111
|
+
message=(
|
|
112
|
+
"See API reference for this function for a replacement implementation: "
|
|
113
|
+
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
|
|
114
|
+
"Read more here on how to create agents that query vector stores: "
|
|
115
|
+
"https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
|
|
116
|
+
),
|
|
117
|
+
)
|
|
59
118
|
def create_vectorstore_router_agent(
|
|
60
119
|
llm: BaseLanguageModel,
|
|
61
120
|
toolkit: VectorStoreRouterToolkit,
|
|
@@ -67,6 +126,59 @@ def create_vectorstore_router_agent(
|
|
|
67
126
|
) -> AgentExecutor:
|
|
68
127
|
"""Construct a VectorStore router agent from an LLM and tools.
|
|
69
128
|
|
|
129
|
+
Note: this class is deprecated. See below for a replacement that uses tool
|
|
130
|
+
calling methods and LangGraph. Install LangGraph with:
|
|
131
|
+
|
|
132
|
+
.. code-block:: bash
|
|
133
|
+
|
|
134
|
+
pip install -U langgraph
|
|
135
|
+
|
|
136
|
+
.. code-block:: python
|
|
137
|
+
|
|
138
|
+
from langchain_core.tools import create_retriever_tool
|
|
139
|
+
from langchain_core.vectorstores import InMemoryVectorStore
|
|
140
|
+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
|
141
|
+
from langgraph.prebuilt import create_react_agent
|
|
142
|
+
|
|
143
|
+
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
144
|
+
|
|
145
|
+
pet_vector_store = InMemoryVectorStore.from_texts(
|
|
146
|
+
[
|
|
147
|
+
"Dogs are great companions, known for their loyalty and friendliness.",
|
|
148
|
+
"Cats are independent pets that often enjoy their own space.",
|
|
149
|
+
],
|
|
150
|
+
OpenAIEmbeddings(),
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
food_vector_store = InMemoryVectorStore.from_texts(
|
|
154
|
+
[
|
|
155
|
+
"Carrots are orange and delicious.",
|
|
156
|
+
"Apples are red and delicious.",
|
|
157
|
+
],
|
|
158
|
+
OpenAIEmbeddings(),
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
tools = [
|
|
162
|
+
create_retriever_tool(
|
|
163
|
+
pet_vector_store.as_retriever(),
|
|
164
|
+
"pet_information_retriever",
|
|
165
|
+
"Fetches information about pets.",
|
|
166
|
+
),
|
|
167
|
+
create_retriever_tool(
|
|
168
|
+
food_vector_store.as_retriever(),
|
|
169
|
+
"food_information_retriever",
|
|
170
|
+
"Fetches information about food.",
|
|
171
|
+
)
|
|
172
|
+
]
|
|
173
|
+
|
|
174
|
+
agent = create_react_agent(llm, tools)
|
|
175
|
+
|
|
176
|
+
for step in agent.stream(
|
|
177
|
+
{"messages": [("human", "Tell me about carrots.")]},
|
|
178
|
+
stream_mode="values",
|
|
179
|
+
):
|
|
180
|
+
step["messages"][-1].pretty_print()
|
|
181
|
+
|
|
70
182
|
Args:
|
|
71
183
|
llm (BaseLanguageModel): LLM that will be used by the agent
|
|
72
184
|
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores
|
|
@@ -74,7 +186,7 @@ def create_vectorstore_router_agent(
|
|
|
74
186
|
prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX.
|
|
75
187
|
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
|
|
76
188
|
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
|
|
77
|
-
|
|
189
|
+
kwargs: Additional named parameters to pass to the ZeroShotAgent.
|
|
78
190
|
|
|
79
191
|
Returns:
|
|
80
192
|
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response.
|
|
@@ -16,8 +16,6 @@ class VectorStoreInfo(BaseModel):
|
|
|
16
16
|
description: str
|
|
17
17
|
|
|
18
18
|
class Config:
|
|
19
|
-
"""Configuration for this pydantic object."""
|
|
20
|
-
|
|
21
19
|
arbitrary_types_allowed = True
|
|
22
20
|
|
|
23
21
|
|
|
@@ -28,8 +26,6 @@ class VectorStoreToolkit(BaseToolkit):
|
|
|
28
26
|
llm: BaseLanguageModel
|
|
29
27
|
|
|
30
28
|
class Config:
|
|
31
|
-
"""Configuration for this pydantic object."""
|
|
32
|
-
|
|
33
29
|
arbitrary_types_allowed = True
|
|
34
30
|
|
|
35
31
|
def get_tools(self) -> List[BaseTool]:
|
|
@@ -71,8 +67,6 @@ class VectorStoreRouterToolkit(BaseToolkit):
|
|
|
71
67
|
llm: BaseLanguageModel
|
|
72
68
|
|
|
73
69
|
class Config:
|
|
74
|
-
"""Configuration for this pydantic object."""
|
|
75
|
-
|
|
76
70
|
arbitrary_types_allowed = True
|
|
77
71
|
|
|
78
72
|
def get_tools(self) -> List[BaseTool]:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import re
|
|
3
|
-
from typing import Union
|
|
3
|
+
from typing import Pattern, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -17,7 +17,7 @@ class ChatOutputParser(AgentOutputParser):
|
|
|
17
17
|
format_instructions: str = FORMAT_INSTRUCTIONS
|
|
18
18
|
"""Default formatting instructions"""
|
|
19
19
|
|
|
20
|
-
pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
|
|
20
|
+
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
|
|
21
21
|
"""Regex pattern to parse the output."""
|
|
22
22
|
|
|
23
23
|
def get_format_instructions(self) -> str:
|
langchain/agents/initialize.py
CHANGED
|
@@ -45,7 +45,7 @@ def initialize_agent(
|
|
|
45
45
|
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
|
|
46
46
|
Defaults to None.
|
|
47
47
|
tags: Tags to apply to the traced runs. Defaults to None.
|
|
48
|
-
|
|
48
|
+
kwargs: Additional keyword arguments passed to the agent executor.
|
|
49
49
|
|
|
50
50
|
Returns:
|
|
51
51
|
An agent executor.
|
langchain/agents/loading.py
CHANGED
|
@@ -44,7 +44,7 @@ def load_agent_from_config(
|
|
|
44
44
|
config: Config dict to load agent from.
|
|
45
45
|
llm: Language model to use as the agent.
|
|
46
46
|
tools: List of tools this agent has access to.
|
|
47
|
-
|
|
47
|
+
kwargs: Additional keyword arguments passed to the agent executor.
|
|
48
48
|
|
|
49
49
|
Returns:
|
|
50
50
|
An agent executor.
|
|
@@ -98,7 +98,7 @@ def load_agent(
|
|
|
98
98
|
|
|
99
99
|
Args:
|
|
100
100
|
path: Path to the agent file.
|
|
101
|
-
|
|
101
|
+
kwargs: Additional keyword arguments passed to the agent executor.
|
|
102
102
|
|
|
103
103
|
Returns:
|
|
104
104
|
An agent executor.
|
langchain/agents/mrkl/base.py
CHANGED
|
@@ -128,7 +128,7 @@ class ZeroShotAgent(Agent):
|
|
|
128
128
|
format_instructions: The format instructions to use.
|
|
129
129
|
Defaults to FORMAT_INSTRUCTIONS.
|
|
130
130
|
input_variables: The input variables to use. Defaults to None.
|
|
131
|
-
|
|
131
|
+
kwargs: Additional parameters to pass to the agent.
|
|
132
132
|
"""
|
|
133
133
|
cls._validate_tools(tools)
|
|
134
134
|
prompt = cls.create_prompt(
|
|
@@ -261,7 +261,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
261
261
|
model: Assistant model to use.
|
|
262
262
|
client: OpenAI or AzureOpenAI client.
|
|
263
263
|
Will create a default OpenAI client if not specified.
|
|
264
|
-
|
|
264
|
+
kwargs: Additional arguments.
|
|
265
265
|
|
|
266
266
|
Returns:
|
|
267
267
|
OpenAIAssistantRunnable configured to run using the created assistant.
|
|
@@ -418,7 +418,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
418
418
|
tools: Override Assistant tools for this run.
|
|
419
419
|
run_metadata: Metadata to associate with new run.
|
|
420
420
|
config: Runnable config. Defaults to None.
|
|
421
|
-
|
|
421
|
+
kwargs: Additional arguments.
|
|
422
422
|
|
|
423
423
|
Return:
|
|
424
424
|
If self.as_agent, will return
|
|
@@ -262,7 +262,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
262
262
|
extra_prompt_messages: Extra prompt messages to use. Defaults to None.
|
|
263
263
|
system_message: The system message to use.
|
|
264
264
|
Defaults to a default system message.
|
|
265
|
-
|
|
265
|
+
kwargs: Additional parameters to pass to the agent.
|
|
266
266
|
"""
|
|
267
267
|
prompt = cls.create_prompt(
|
|
268
268
|
extra_prompt_messages=extra_prompt_messages,
|
|
@@ -307,7 +307,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
307
307
|
extra_prompt_messages: Extra prompt messages to use. Default is None.
|
|
308
308
|
system_message: The system message to use.
|
|
309
309
|
Default is a default system message.
|
|
310
|
-
|
|
310
|
+
kwargs: Additional arguments.
|
|
311
311
|
"""
|
|
312
312
|
prompt = cls.create_prompt(
|
|
313
313
|
extra_prompt_messages=extra_prompt_messages,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import re
|
|
3
|
-
from typing import Union
|
|
3
|
+
from typing import Pattern, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -42,7 +42,7 @@ class ReActJsonSingleInputOutputParser(AgentOutputParser):
|
|
|
42
42
|
|
|
43
43
|
"""
|
|
44
44
|
|
|
45
|
-
pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
|
|
45
|
+
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
|
|
46
46
|
"""Regex pattern to parse the output."""
|
|
47
47
|
|
|
48
48
|
def get_format_instructions(self) -> str:
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import re
|
|
6
|
-
from typing import Optional, Union
|
|
6
|
+
from typing import Optional, Pattern, Union
|
|
7
7
|
|
|
8
8
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
9
9
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -23,7 +23,7 @@ class StructuredChatOutputParser(AgentOutputParser):
|
|
|
23
23
|
format_instructions: str = FORMAT_INSTRUCTIONS
|
|
24
24
|
"""Default formatting instructions"""
|
|
25
25
|
|
|
26
|
-
pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
|
|
26
|
+
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
|
|
27
27
|
"""Regex pattern to parse the output."""
|
|
28
28
|
|
|
29
29
|
def get_format_instructions(self) -> str:
|
langchain/chains/__init__.py
CHANGED
|
@@ -59,6 +59,7 @@ _module_lookup = {
|
|
|
59
59
|
"OpenAIModerationChain": "langchain.chains.moderation",
|
|
60
60
|
"NatBotChain": "langchain.chains.natbot.base",
|
|
61
61
|
"create_citation_fuzzy_match_chain": "langchain.chains.openai_functions",
|
|
62
|
+
"create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions",
|
|
62
63
|
"create_extraction_chain": "langchain.chains.openai_functions",
|
|
63
64
|
"create_extraction_chain_pydantic": "langchain.chains.openai_functions",
|
|
64
65
|
"create_qa_with_sources_chain": "langchain.chains.openai_functions",
|
langchain/chains/api/base.py
CHANGED
|
@@ -5,6 +5,7 @@ from __future__ import annotations
|
|
|
5
5
|
from typing import Any, Dict, List, Optional, Sequence, Tuple
|
|
6
6
|
from urllib.parse import urlparse
|
|
7
7
|
|
|
8
|
+
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.callbacks import (
|
|
9
10
|
AsyncCallbackManagerForChainRun,
|
|
10
11
|
CallbackManagerForChainRun,
|
|
@@ -53,6 +54,15 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
|
|
|
53
54
|
try:
|
|
54
55
|
from langchain_community.utilities.requests import TextRequestsWrapper
|
|
55
56
|
|
|
57
|
+
@deprecated(
|
|
58
|
+
since="0.2.13",
|
|
59
|
+
message=(
|
|
60
|
+
"This class is deprecated and will be removed in langchain 1.0. "
|
|
61
|
+
"See API reference for replacement: "
|
|
62
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html" # noqa: E501
|
|
63
|
+
),
|
|
64
|
+
removal="1.0",
|
|
65
|
+
)
|
|
56
66
|
class APIChain(Chain):
|
|
57
67
|
"""Chain that makes API calls and summarizes the responses to answer a question.
|
|
58
68
|
|
|
@@ -69,7 +79,117 @@ try:
|
|
|
69
79
|
what network access it has.
|
|
70
80
|
|
|
71
81
|
See https://python.langchain.com/docs/security for more information.
|
|
72
|
-
|
|
82
|
+
|
|
83
|
+
Note: this class is deprecated. See below for a replacement implementation
|
|
84
|
+
using LangGraph. The benefits of this implementation are:
|
|
85
|
+
|
|
86
|
+
- Uses LLM tool calling features to encourage properly-formatted API requests;
|
|
87
|
+
- Support for both token-by-token and step-by-step streaming;
|
|
88
|
+
- Support for checkpointing and memory of chat history;
|
|
89
|
+
- Easier to modify or extend (e.g., with additional tools, structured responses, etc.)
|
|
90
|
+
|
|
91
|
+
Install LangGraph with:
|
|
92
|
+
|
|
93
|
+
.. code-block:: bash
|
|
94
|
+
|
|
95
|
+
pip install -U langgraph
|
|
96
|
+
|
|
97
|
+
.. code-block:: python
|
|
98
|
+
|
|
99
|
+
from typing import Annotated, Sequence
|
|
100
|
+
from typing_extensions import TypedDict
|
|
101
|
+
|
|
102
|
+
from langchain.chains.api.prompt import API_URL_PROMPT
|
|
103
|
+
from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
|
|
104
|
+
from langchain_community.utilities.requests import TextRequestsWrapper
|
|
105
|
+
from langchain_core.messages import BaseMessage
|
|
106
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
107
|
+
from langchain_openai import ChatOpenAI
|
|
108
|
+
from langchain_core.runnables import RunnableConfig
|
|
109
|
+
from langgraph.graph import END, StateGraph
|
|
110
|
+
from langgraph.graph.message import add_messages
|
|
111
|
+
from langgraph.prebuilt.tool_node import ToolNode
|
|
112
|
+
|
|
113
|
+
# NOTE: There are inherent risks in giving models discretion
|
|
114
|
+
# to execute real-world actions. We must "opt-in" to these
|
|
115
|
+
# risks by setting allow_dangerous_request=True to use these tools.
|
|
116
|
+
# This can be dangerous for calling unwanted requests. Please make
|
|
117
|
+
# sure your custom OpenAPI spec (yaml) is safe and that permissions
|
|
118
|
+
# associated with the tools are narrowly-scoped.
|
|
119
|
+
ALLOW_DANGEROUS_REQUESTS = True
|
|
120
|
+
|
|
121
|
+
# Subset of spec for https://jsonplaceholder.typicode.com
|
|
122
|
+
api_spec = \"\"\"
|
|
123
|
+
openapi: 3.0.0
|
|
124
|
+
info:
|
|
125
|
+
title: JSONPlaceholder API
|
|
126
|
+
version: 1.0.0
|
|
127
|
+
servers:
|
|
128
|
+
- url: https://jsonplaceholder.typicode.com
|
|
129
|
+
paths:
|
|
130
|
+
/posts:
|
|
131
|
+
get:
|
|
132
|
+
summary: Get posts
|
|
133
|
+
parameters: &id001
|
|
134
|
+
- name: _limit
|
|
135
|
+
in: query
|
|
136
|
+
required: false
|
|
137
|
+
schema:
|
|
138
|
+
type: integer
|
|
139
|
+
example: 2
|
|
140
|
+
description: Limit the number of results
|
|
141
|
+
\"\"\"
|
|
142
|
+
|
|
143
|
+
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
144
|
+
toolkit = RequestsToolkit(
|
|
145
|
+
requests_wrapper=TextRequestsWrapper(headers={}), # no auth required
|
|
146
|
+
allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS,
|
|
147
|
+
)
|
|
148
|
+
tools = toolkit.get_tools()
|
|
149
|
+
|
|
150
|
+
api_request_chain = (
|
|
151
|
+
API_URL_PROMPT.partial(api_docs=api_spec)
|
|
152
|
+
| llm.bind_tools(tools, tool_choice="any")
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
class ChainState(TypedDict):
|
|
156
|
+
\"\"\"LangGraph state.\"\"\"
|
|
157
|
+
|
|
158
|
+
messages: Annotated[Sequence[BaseMessage], add_messages]
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
async def acall_request_chain(state: ChainState, config: RunnableConfig):
|
|
162
|
+
last_message = state["messages"][-1]
|
|
163
|
+
response = await api_request_chain.ainvoke(
|
|
164
|
+
{"question": last_message.content}, config
|
|
165
|
+
)
|
|
166
|
+
return {"messages": [response]}
|
|
167
|
+
|
|
168
|
+
async def acall_model(state: ChainState, config: RunnableConfig):
|
|
169
|
+
response = await llm.ainvoke(state["messages"], config)
|
|
170
|
+
return {"messages": [response]}
|
|
171
|
+
|
|
172
|
+
graph_builder = StateGraph(ChainState)
|
|
173
|
+
graph_builder.add_node("call_tool", acall_request_chain)
|
|
174
|
+
graph_builder.add_node("execute_tool", ToolNode(tools))
|
|
175
|
+
graph_builder.add_node("call_model", acall_model)
|
|
176
|
+
graph_builder.set_entry_point("call_tool")
|
|
177
|
+
graph_builder.add_edge("call_tool", "execute_tool")
|
|
178
|
+
graph_builder.add_edge("execute_tool", "call_model")
|
|
179
|
+
graph_builder.add_edge("call_model", END)
|
|
180
|
+
chain = graph_builder.compile()
|
|
181
|
+
|
|
182
|
+
.. code-block:: python
|
|
183
|
+
|
|
184
|
+
example_query = "Fetch the top two posts. What are their titles?"
|
|
185
|
+
|
|
186
|
+
events = chain.astream(
|
|
187
|
+
{"messages": [("user", example_query)]},
|
|
188
|
+
stream_mode="values",
|
|
189
|
+
)
|
|
190
|
+
async for event in events:
|
|
191
|
+
event["messages"][-1].pretty_print()
|
|
192
|
+
""" # noqa: E501
|
|
73
193
|
|
|
74
194
|
api_request_chain: LLMChain
|
|
75
195
|
api_answer_chain: LLMChain
|
langchain/chains/base.py
CHANGED
|
@@ -97,8 +97,6 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
|
|
|
97
97
|
"""[DEPRECATED] Use `callbacks` instead."""
|
|
98
98
|
|
|
99
99
|
class Config:
|
|
100
|
-
"""Configuration for this pydantic object."""
|
|
101
|
-
|
|
102
100
|
arbitrary_types_allowed = True
|
|
103
101
|
|
|
104
102
|
def get_input_schema(
|
|
@@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional, Tuple, Type
|
|
|
6
6
|
|
|
7
7
|
from langchain_core.callbacks import Callbacks
|
|
8
8
|
from langchain_core.documents import Document
|
|
9
|
-
from langchain_core.pydantic_v1 import BaseModel,
|
|
9
|
+
from langchain_core.pydantic_v1 import BaseModel, root_validator
|
|
10
10
|
from langchain_core.runnables.config import RunnableConfig
|
|
11
11
|
from langchain_core.runnables.utils import create_model
|
|
12
12
|
|
|
@@ -127,10 +127,8 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
|
|
|
127
127
|
return _output_keys
|
|
128
128
|
|
|
129
129
|
class Config:
|
|
130
|
-
"""Configuration for this pydantic object."""
|
|
131
|
-
|
|
132
|
-
extra = Extra.forbid
|
|
133
130
|
arbitrary_types_allowed = True
|
|
131
|
+
extra = "forbid"
|
|
134
132
|
|
|
135
133
|
@root_validator(pre=True)
|
|
136
134
|
def get_reduce_chain(cls, values: Dict) -> Dict:
|
|
@@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast
|
|
|
6
6
|
|
|
7
7
|
from langchain_core.callbacks import Callbacks
|
|
8
8
|
from langchain_core.documents import Document
|
|
9
|
-
from langchain_core.pydantic_v1 import BaseModel,
|
|
9
|
+
from langchain_core.pydantic_v1 import BaseModel, root_validator
|
|
10
10
|
from langchain_core.runnables.config import RunnableConfig
|
|
11
11
|
from langchain_core.runnables.utils import create_model
|
|
12
12
|
|
|
@@ -25,7 +25,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
25
25
|
Example:
|
|
26
26
|
.. code-block:: python
|
|
27
27
|
|
|
28
|
-
from langchain.chains import
|
|
28
|
+
from langchain.chains import MapRerankDocumentsChain, LLMChain
|
|
29
29
|
from langchain_core.prompts import PromptTemplate
|
|
30
30
|
from langchain_community.llms import OpenAI
|
|
31
31
|
from langchain.output_parsers.regex import RegexParser
|
|
@@ -39,7 +39,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
39
39
|
prompt_template = (
|
|
40
40
|
"Use the following context to tell me the chemical formula "
|
|
41
41
|
"for water. Output both your answer and a score of how confident "
|
|
42
|
-
"you are. Context: {
|
|
42
|
+
"you are. Context: {context}"
|
|
43
43
|
)
|
|
44
44
|
output_parser = RegexParser(
|
|
45
45
|
regex=r"(.*?)\nScore: (.*)",
|
|
@@ -75,10 +75,8 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
75
75
|
Intermediate steps include the results of calling llm_chain on each document."""
|
|
76
76
|
|
|
77
77
|
class Config:
|
|
78
|
-
"""Configuration for this pydantic object."""
|
|
79
|
-
|
|
80
|
-
extra = Extra.forbid
|
|
81
78
|
arbitrary_types_allowed = True
|
|
79
|
+
extra = "forbid"
|
|
82
80
|
|
|
83
81
|
def get_output_schema(
|
|
84
82
|
self, config: Optional[RunnableConfig] = None
|
|
@@ -6,7 +6,6 @@ from typing import Any, Callable, List, Optional, Protocol, Tuple
|
|
|
6
6
|
|
|
7
7
|
from langchain_core.callbacks import Callbacks
|
|
8
8
|
from langchain_core.documents import Document
|
|
9
|
-
from langchain_core.pydantic_v1 import Extra
|
|
10
9
|
|
|
11
10
|
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
|
12
11
|
|
|
@@ -206,10 +205,8 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
|
|
206
205
|
Otherwise, after it reaches the max number, it will throw an error"""
|
|
207
206
|
|
|
208
207
|
class Config:
|
|
209
|
-
"""Configuration for this pydantic object."""
|
|
210
|
-
|
|
211
|
-
extra = Extra.forbid
|
|
212
208
|
arbitrary_types_allowed = True
|
|
209
|
+
extra = "forbid"
|
|
213
210
|
|
|
214
211
|
@property
|
|
215
212
|
def _collapse_chain(self) -> BaseCombineDocumentsChain:
|
|
@@ -8,7 +8,7 @@ from langchain_core.callbacks import Callbacks
|
|
|
8
8
|
from langchain_core.documents import Document
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate, format_document
|
|
10
10
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
11
|
-
from langchain_core.pydantic_v1 import
|
|
11
|
+
from langchain_core.pydantic_v1 import Field, root_validator
|
|
12
12
|
|
|
13
13
|
from langchain.chains.combine_documents.base import (
|
|
14
14
|
BaseCombineDocumentsChain,
|
|
@@ -99,10 +99,8 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
99
99
|
return _output_keys
|
|
100
100
|
|
|
101
101
|
class Config:
|
|
102
|
-
"""Configuration for this pydantic object."""
|
|
103
|
-
|
|
104
|
-
extra = Extra.forbid
|
|
105
102
|
arbitrary_types_allowed = True
|
|
103
|
+
extra = "forbid"
|
|
106
104
|
|
|
107
105
|
@root_validator(pre=True)
|
|
108
106
|
def get_return_intermediate_steps(cls, values: Dict) -> Dict:
|