langchain 0.3.22__py3-none-any.whl → 0.3.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/_api/module_import.py +3 -3
- langchain/agents/agent.py +104 -109
- langchain/agents/agent_iterator.py +11 -15
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
- langchain/agents/chat/base.py +7 -6
- langchain/agents/chat/output_parser.py +2 -1
- langchain/agents/conversational/base.py +5 -4
- langchain/agents/conversational_chat/base.py +9 -8
- langchain/agents/format_scratchpad/log.py +1 -3
- langchain/agents/format_scratchpad/log_to_messages.py +3 -5
- langchain/agents/format_scratchpad/openai_functions.py +4 -4
- langchain/agents/format_scratchpad/tools.py +3 -3
- langchain/agents/format_scratchpad/xml.py +1 -3
- langchain/agents/initialize.py +2 -1
- langchain/agents/json_chat/base.py +3 -2
- langchain/agents/loading.py +5 -5
- langchain/agents/mrkl/base.py +6 -5
- langchain/agents/openai_assistant/base.py +13 -17
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
- langchain/agents/openai_functions_agent/base.py +13 -12
- langchain/agents/openai_functions_multi_agent/base.py +15 -14
- langchain/agents/openai_tools/base.py +2 -1
- langchain/agents/output_parsers/openai_functions.py +2 -2
- langchain/agents/output_parsers/openai_tools.py +6 -6
- langchain/agents/output_parsers/react_json_single_input.py +2 -1
- langchain/agents/output_parsers/self_ask.py +2 -1
- langchain/agents/output_parsers/tools.py +7 -7
- langchain/agents/react/agent.py +3 -2
- langchain/agents/react/base.py +4 -3
- langchain/agents/schema.py +3 -3
- langchain/agents/self_ask_with_search/base.py +2 -1
- langchain/agents/structured_chat/base.py +9 -8
- langchain/agents/structured_chat/output_parser.py +2 -1
- langchain/agents/tool_calling_agent/base.py +3 -2
- langchain/agents/tools.py +4 -4
- langchain/agents/types.py +3 -3
- langchain/agents/utils.py +1 -1
- langchain/agents/xml/base.py +7 -6
- langchain/callbacks/streaming_aiter.py +3 -2
- langchain/callbacks/streaming_aiter_final_only.py +3 -3
- langchain/callbacks/streaming_stdout_final_only.py +3 -3
- langchain/chains/api/base.py +11 -12
- langchain/chains/base.py +47 -50
- langchain/chains/combine_documents/base.py +23 -23
- langchain/chains/combine_documents/map_reduce.py +12 -12
- langchain/chains/combine_documents/map_rerank.py +16 -15
- langchain/chains/combine_documents/reduce.py +17 -17
- langchain/chains/combine_documents/refine.py +12 -12
- langchain/chains/combine_documents/stuff.py +10 -10
- langchain/chains/constitutional_ai/base.py +9 -9
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +30 -30
- langchain/chains/elasticsearch_database/base.py +13 -13
- langchain/chains/example_generator.py +1 -3
- langchain/chains/flare/base.py +13 -12
- langchain/chains/flare/prompts.py +2 -4
- langchain/chains/hyde/base.py +8 -8
- langchain/chains/llm.py +31 -30
- langchain/chains/llm_checker/base.py +6 -6
- langchain/chains/llm_math/base.py +10 -10
- langchain/chains/llm_summarization_checker/base.py +6 -6
- langchain/chains/loading.py +12 -14
- langchain/chains/mapreduce.py +7 -6
- langchain/chains/moderation.py +8 -8
- langchain/chains/natbot/base.py +6 -6
- langchain/chains/openai_functions/base.py +8 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
- langchain/chains/openai_functions/extraction.py +3 -3
- langchain/chains/openai_functions/openapi.py +12 -12
- langchain/chains/openai_functions/qa_with_structure.py +4 -4
- langchain/chains/openai_functions/utils.py +2 -2
- langchain/chains/openai_tools/extraction.py +2 -2
- langchain/chains/prompt_selector.py +3 -3
- langchain/chains/qa_generation/base.py +5 -5
- langchain/chains/qa_with_sources/base.py +21 -21
- langchain/chains/qa_with_sources/loading.py +2 -1
- langchain/chains/qa_with_sources/retrieval.py +6 -6
- langchain/chains/qa_with_sources/vector_db.py +8 -8
- langchain/chains/query_constructor/base.py +4 -3
- langchain/chains/query_constructor/parser.py +5 -4
- langchain/chains/question_answering/chain.py +3 -2
- langchain/chains/retrieval.py +2 -2
- langchain/chains/retrieval_qa/base.py +16 -16
- langchain/chains/router/base.py +12 -11
- langchain/chains/router/embedding_router.py +12 -11
- langchain/chains/router/llm_router.py +12 -12
- langchain/chains/router/multi_prompt.py +3 -3
- langchain/chains/router/multi_retrieval_qa.py +5 -4
- langchain/chains/sequential.py +18 -18
- langchain/chains/sql_database/query.py +4 -4
- langchain/chains/structured_output/base.py +14 -13
- langchain/chains/summarize/chain.py +4 -3
- langchain/chains/transform.py +12 -11
- langchain/chat_models/base.py +34 -31
- langchain/embeddings/__init__.py +1 -1
- langchain/embeddings/base.py +4 -4
- langchain/embeddings/cache.py +19 -18
- langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
- langchain/evaluation/comparison/eval_chain.py +10 -10
- langchain/evaluation/criteria/eval_chain.py +11 -10
- langchain/evaluation/embedding_distance/base.py +21 -21
- langchain/evaluation/exact_match/base.py +3 -3
- langchain/evaluation/loading.py +7 -8
- langchain/evaluation/qa/eval_chain.py +7 -6
- langchain/evaluation/regex_match/base.py +3 -3
- langchain/evaluation/schema.py +6 -5
- langchain/evaluation/scoring/eval_chain.py +9 -9
- langchain/evaluation/string_distance/base.py +23 -23
- langchain/hub.py +2 -1
- langchain/indexes/_sql_record_manager.py +8 -7
- langchain/indexes/vectorstore.py +11 -11
- langchain/llms/__init__.py +3 -3
- langchain/memory/buffer.py +13 -13
- langchain/memory/buffer_window.py +5 -5
- langchain/memory/chat_memory.py +5 -5
- langchain/memory/combined.py +10 -10
- langchain/memory/entity.py +8 -7
- langchain/memory/readonly.py +4 -4
- langchain/memory/simple.py +5 -5
- langchain/memory/summary.py +8 -8
- langchain/memory/summary_buffer.py +11 -11
- langchain/memory/token_buffer.py +5 -5
- langchain/memory/utils.py +2 -2
- langchain/memory/vectorstore.py +15 -14
- langchain/memory/vectorstore_token_buffer_memory.py +7 -7
- langchain/model_laboratory.py +4 -3
- langchain/output_parsers/combining.py +5 -5
- langchain/output_parsers/datetime.py +1 -2
- langchain/output_parsers/enum.py +4 -5
- langchain/output_parsers/pandas_dataframe.py +5 -5
- langchain/output_parsers/regex.py +4 -4
- langchain/output_parsers/regex_dict.py +4 -4
- langchain/output_parsers/retry.py +2 -2
- langchain/output_parsers/structured.py +5 -5
- langchain/output_parsers/yaml.py +3 -3
- langchain/pydantic_v1/__init__.py +1 -6
- langchain/pydantic_v1/dataclasses.py +1 -5
- langchain/pydantic_v1/main.py +1 -5
- langchain/retrievers/contextual_compression.py +3 -3
- langchain/retrievers/document_compressors/base.py +3 -2
- langchain/retrievers/document_compressors/chain_extract.py +4 -3
- langchain/retrievers/document_compressors/chain_filter.py +3 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
- langchain/retrievers/document_compressors/cross_encoder.py +1 -2
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
- langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
- langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
- langchain/retrievers/ensemble.py +15 -19
- langchain/retrievers/merger_retriever.py +7 -12
- langchain/retrievers/multi_query.py +14 -13
- langchain/retrievers/multi_vector.py +4 -4
- langchain/retrievers/parent_document_retriever.py +9 -8
- langchain/retrievers/re_phraser.py +2 -3
- langchain/retrievers/self_query/base.py +13 -12
- langchain/retrievers/time_weighted_retriever.py +14 -14
- langchain/runnables/openai_functions.py +4 -3
- langchain/smith/evaluation/config.py +7 -6
- langchain/smith/evaluation/progress.py +3 -2
- langchain/smith/evaluation/runner_utils.py +58 -61
- langchain/smith/evaluation/string_run_evaluator.py +29 -29
- langchain/storage/encoder_backed.py +7 -11
- langchain/storage/file_system.py +5 -4
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/METADATA +5 -3
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/RECORD +169 -169
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/WHEEL +1 -1
- langchain-0.3.24.dist-info/entry_points.txt +4 -0
- langchain-0.3.22.dist-info/entry_points.txt +0 -5
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Memory used to save agent output AND intermediate steps."""
|
|
2
2
|
|
|
3
|
-
from typing import Any
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
5
|
from langchain_core.language_models import BaseLanguageModel
|
|
6
6
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
@@ -12,7 +12,7 @@ from langchain.agents.format_scratchpad import (
|
|
|
12
12
|
from langchain.memory.chat_memory import BaseChatMemory
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
class AgentTokenBufferMemory(BaseChatMemory):
|
|
15
|
+
class AgentTokenBufferMemory(BaseChatMemory):
|
|
16
16
|
"""Memory used to save agent output AND intermediate steps.
|
|
17
17
|
|
|
18
18
|
Parameters:
|
|
@@ -43,19 +43,19 @@ class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override]
|
|
|
43
43
|
format_as_tools: bool = False
|
|
44
44
|
|
|
45
45
|
@property
|
|
46
|
-
def buffer(self) ->
|
|
46
|
+
def buffer(self) -> list[BaseMessage]:
|
|
47
47
|
"""String buffer of memory."""
|
|
48
48
|
return self.chat_memory.messages
|
|
49
49
|
|
|
50
50
|
@property
|
|
51
|
-
def memory_variables(self) ->
|
|
51
|
+
def memory_variables(self) -> list[str]:
|
|
52
52
|
"""Always return list of memory variables.
|
|
53
53
|
|
|
54
54
|
:meta private:
|
|
55
55
|
"""
|
|
56
56
|
return [self.memory_key]
|
|
57
57
|
|
|
58
|
-
def load_memory_variables(self, inputs:
|
|
58
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
59
59
|
"""Return history buffer.
|
|
60
60
|
|
|
61
61
|
Args:
|
|
@@ -74,7 +74,7 @@ class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override]
|
|
|
74
74
|
)
|
|
75
75
|
return {self.memory_key: final_buffer}
|
|
76
76
|
|
|
77
|
-
def save_context(self, inputs:
|
|
77
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, Any]) -> None:
|
|
78
78
|
"""Save context from this conversation to buffer. Pruned.
|
|
79
79
|
|
|
80
80
|
Args:
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from typing import Any, Optional, Union
|
|
4
5
|
|
|
5
6
|
from langchain_core._api import deprecated
|
|
6
7
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
@@ -51,11 +52,11 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
51
52
|
llm: BaseLanguageModel
|
|
52
53
|
tools: Sequence[BaseTool]
|
|
53
54
|
prompt: BasePromptTemplate
|
|
54
|
-
output_parser:
|
|
55
|
+
output_parser: type[OpenAIFunctionsAgentOutputParser] = (
|
|
55
56
|
OpenAIFunctionsAgentOutputParser
|
|
56
57
|
)
|
|
57
58
|
|
|
58
|
-
def get_allowed_tools(self) ->
|
|
59
|
+
def get_allowed_tools(self) -> list[str]:
|
|
59
60
|
"""Get allowed tools."""
|
|
60
61
|
return [t.name for t in self.tools]
|
|
61
62
|
|
|
@@ -81,19 +82,19 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
81
82
|
return self
|
|
82
83
|
|
|
83
84
|
@property
|
|
84
|
-
def input_keys(self) ->
|
|
85
|
+
def input_keys(self) -> list[str]:
|
|
85
86
|
"""Get input keys. Input refers to user input here."""
|
|
86
87
|
return ["input"]
|
|
87
88
|
|
|
88
89
|
@property
|
|
89
|
-
def functions(self) ->
|
|
90
|
+
def functions(self) -> list[dict]:
|
|
90
91
|
"""Get functions."""
|
|
91
92
|
|
|
92
93
|
return [dict(convert_to_openai_function(t)) for t in self.tools]
|
|
93
94
|
|
|
94
95
|
def plan(
|
|
95
96
|
self,
|
|
96
|
-
intermediate_steps:
|
|
97
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
97
98
|
callbacks: Callbacks = None,
|
|
98
99
|
with_functions: bool = True,
|
|
99
100
|
**kwargs: Any,
|
|
@@ -135,7 +136,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
135
136
|
|
|
136
137
|
async def aplan(
|
|
137
138
|
self,
|
|
138
|
-
intermediate_steps:
|
|
139
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
139
140
|
callbacks: Callbacks = None,
|
|
140
141
|
**kwargs: Any,
|
|
141
142
|
) -> Union[AgentAction, AgentFinish]:
|
|
@@ -168,7 +169,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
168
169
|
def return_stopped_response(
|
|
169
170
|
self,
|
|
170
171
|
early_stopping_method: str,
|
|
171
|
-
intermediate_steps:
|
|
172
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
172
173
|
**kwargs: Any,
|
|
173
174
|
) -> AgentFinish:
|
|
174
175
|
"""Return response when agent has been stopped due to max iterations.
|
|
@@ -213,7 +214,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
213
214
|
system_message: Optional[SystemMessage] = SystemMessage(
|
|
214
215
|
content="You are a helpful AI assistant."
|
|
215
216
|
),
|
|
216
|
-
extra_prompt_messages: Optional[
|
|
217
|
+
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
|
217
218
|
) -> ChatPromptTemplate:
|
|
218
219
|
"""Create prompt for this agent.
|
|
219
220
|
|
|
@@ -227,7 +228,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
227
228
|
A prompt template to pass into this agent.
|
|
228
229
|
"""
|
|
229
230
|
_prompts = extra_prompt_messages or []
|
|
230
|
-
messages:
|
|
231
|
+
messages: list[Union[BaseMessagePromptTemplate, BaseMessage]]
|
|
231
232
|
if system_message:
|
|
232
233
|
messages = [system_message]
|
|
233
234
|
else:
|
|
@@ -240,7 +241,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
240
241
|
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
|
241
242
|
]
|
|
242
243
|
)
|
|
243
|
-
return ChatPromptTemplate(messages=messages)
|
|
244
|
+
return ChatPromptTemplate(messages=messages)
|
|
244
245
|
|
|
245
246
|
@classmethod
|
|
246
247
|
def from_llm_and_tools(
|
|
@@ -248,7 +249,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
|
|
|
248
249
|
llm: BaseLanguageModel,
|
|
249
250
|
tools: Sequence[BaseTool],
|
|
250
251
|
callback_manager: Optional[BaseCallbackManager] = None,
|
|
251
|
-
extra_prompt_messages: Optional[
|
|
252
|
+
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
|
252
253
|
system_message: Optional[SystemMessage] = SystemMessage(
|
|
253
254
|
content="You are a helpful AI assistant."
|
|
254
255
|
),
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
+
from collections.abc import Sequence
|
|
4
5
|
from json import JSONDecodeError
|
|
5
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional, Union
|
|
6
7
|
|
|
7
8
|
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
|
|
@@ -34,7 +35,7 @@ from langchain.agents.format_scratchpad.openai_functions import (
|
|
|
34
35
|
_FunctionsAgentAction = AgentActionMessageLog
|
|
35
36
|
|
|
36
37
|
|
|
37
|
-
def _parse_ai_message(message: BaseMessage) -> Union[
|
|
38
|
+
def _parse_ai_message(message: BaseMessage) -> Union[list[AgentAction], AgentFinish]:
|
|
38
39
|
"""Parse an AI message."""
|
|
39
40
|
if not isinstance(message, AIMessage):
|
|
40
41
|
raise TypeError(f"Expected an AI message got {type(message)}")
|
|
@@ -58,7 +59,7 @@ def _parse_ai_message(message: BaseMessage) -> Union[List[AgentAction], AgentFin
|
|
|
58
59
|
f"the `arguments` JSON does not contain `actions` key."
|
|
59
60
|
)
|
|
60
61
|
|
|
61
|
-
final_tools:
|
|
62
|
+
final_tools: list[AgentAction] = []
|
|
62
63
|
for tool_schema in tools:
|
|
63
64
|
if "action" in tool_schema:
|
|
64
65
|
_tool_input = tool_schema["action"]
|
|
@@ -112,7 +113,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
112
113
|
tools: Sequence[BaseTool]
|
|
113
114
|
prompt: BasePromptTemplate
|
|
114
115
|
|
|
115
|
-
def get_allowed_tools(self) ->
|
|
116
|
+
def get_allowed_tools(self) -> list[str]:
|
|
116
117
|
"""Get allowed tools."""
|
|
117
118
|
return [t.name for t in self.tools]
|
|
118
119
|
|
|
@@ -127,12 +128,12 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
127
128
|
return self
|
|
128
129
|
|
|
129
130
|
@property
|
|
130
|
-
def input_keys(self) ->
|
|
131
|
+
def input_keys(self) -> list[str]:
|
|
131
132
|
"""Get input keys. Input refers to user input here."""
|
|
132
133
|
return ["input"]
|
|
133
134
|
|
|
134
135
|
@property
|
|
135
|
-
def functions(self) ->
|
|
136
|
+
def functions(self) -> list[dict]:
|
|
136
137
|
"""Get the functions for the agent."""
|
|
137
138
|
enum_vals = [t.name for t in self.tools]
|
|
138
139
|
tool_selection = {
|
|
@@ -194,10 +195,10 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
194
195
|
|
|
195
196
|
def plan(
|
|
196
197
|
self,
|
|
197
|
-
intermediate_steps:
|
|
198
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
198
199
|
callbacks: Callbacks = None,
|
|
199
200
|
**kwargs: Any,
|
|
200
|
-
) -> Union[
|
|
201
|
+
) -> Union[list[AgentAction], AgentFinish]:
|
|
201
202
|
"""Given input, decided what to do.
|
|
202
203
|
|
|
203
204
|
Args:
|
|
@@ -224,10 +225,10 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
224
225
|
|
|
225
226
|
async def aplan(
|
|
226
227
|
self,
|
|
227
|
-
intermediate_steps:
|
|
228
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
228
229
|
callbacks: Callbacks = None,
|
|
229
230
|
**kwargs: Any,
|
|
230
|
-
) -> Union[
|
|
231
|
+
) -> Union[list[AgentAction], AgentFinish]:
|
|
231
232
|
"""Async given input, decided what to do.
|
|
232
233
|
|
|
233
234
|
Args:
|
|
@@ -258,7 +259,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
258
259
|
system_message: Optional[SystemMessage] = SystemMessage(
|
|
259
260
|
content="You are a helpful AI assistant."
|
|
260
261
|
),
|
|
261
|
-
extra_prompt_messages: Optional[
|
|
262
|
+
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
|
262
263
|
) -> BasePromptTemplate:
|
|
263
264
|
"""Create prompt for this agent.
|
|
264
265
|
|
|
@@ -272,7 +273,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
272
273
|
A prompt template to pass into this agent.
|
|
273
274
|
"""
|
|
274
275
|
_prompts = extra_prompt_messages or []
|
|
275
|
-
messages:
|
|
276
|
+
messages: list[Union[BaseMessagePromptTemplate, BaseMessage]]
|
|
276
277
|
if system_message:
|
|
277
278
|
messages = [system_message]
|
|
278
279
|
else:
|
|
@@ -285,7 +286,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
285
286
|
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
|
286
287
|
]
|
|
287
288
|
)
|
|
288
|
-
return ChatPromptTemplate(messages=messages)
|
|
289
|
+
return ChatPromptTemplate(messages=messages)
|
|
289
290
|
|
|
290
291
|
@classmethod
|
|
291
292
|
def from_llm_and_tools(
|
|
@@ -293,7 +294,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
|
|
|
293
294
|
llm: BaseLanguageModel,
|
|
294
295
|
tools: Sequence[BaseTool],
|
|
295
296
|
callback_manager: Optional[BaseCallbackManager] = None,
|
|
296
|
-
extra_prompt_messages: Optional[
|
|
297
|
+
extra_prompt_messages: Optional[list[BaseMessagePromptTemplate]] = None,
|
|
297
298
|
system_message: Optional[SystemMessage] = SystemMessage(
|
|
298
299
|
content="You are a helpful AI assistant."
|
|
299
300
|
),
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from json import JSONDecodeError
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -77,7 +77,7 @@ class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
|
|
|
77
77
|
)
|
|
78
78
|
|
|
79
79
|
def parse_result(
|
|
80
|
-
self, result:
|
|
80
|
+
self, result: list[Generation], *, partial: bool = False
|
|
81
81
|
) -> Union[AgentAction, AgentFinish]:
|
|
82
82
|
if not isinstance(result[0], ChatGeneration):
|
|
83
83
|
raise ValueError("This output parser only works on ChatGeneration output")
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Union
|
|
2
2
|
|
|
3
3
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
4
4
|
from langchain_core.messages import BaseMessage
|
|
@@ -15,12 +15,12 @@ OpenAIToolAgentAction = ToolAgentAction
|
|
|
15
15
|
|
|
16
16
|
def parse_ai_message_to_openai_tool_action(
|
|
17
17
|
message: BaseMessage,
|
|
18
|
-
) -> Union[
|
|
18
|
+
) -> Union[list[AgentAction], AgentFinish]:
|
|
19
19
|
"""Parse an AI message potentially containing tool_calls."""
|
|
20
20
|
tool_actions = parse_ai_message_to_tool_action(message)
|
|
21
21
|
if isinstance(tool_actions, AgentFinish):
|
|
22
22
|
return tool_actions
|
|
23
|
-
final_actions:
|
|
23
|
+
final_actions: list[AgentAction] = []
|
|
24
24
|
for action in tool_actions:
|
|
25
25
|
if isinstance(action, ToolAgentAction):
|
|
26
26
|
final_actions.append(
|
|
@@ -54,12 +54,12 @@ class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
|
|
|
54
54
|
return "openai-tools-agent-output-parser"
|
|
55
55
|
|
|
56
56
|
def parse_result(
|
|
57
|
-
self, result:
|
|
58
|
-
) -> Union[
|
|
57
|
+
self, result: list[Generation], *, partial: bool = False
|
|
58
|
+
) -> Union[list[AgentAction], AgentFinish]:
|
|
59
59
|
if not isinstance(result[0], ChatGeneration):
|
|
60
60
|
raise ValueError("This output parser only works on ChatGeneration output")
|
|
61
61
|
message = result[0].message
|
|
62
62
|
return parse_ai_message_to_openai_tool_action(message)
|
|
63
63
|
|
|
64
|
-
def parse(self, text: str) -> Union[
|
|
64
|
+
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
|
|
65
65
|
raise ValueError("Can only parse messages")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from json import JSONDecodeError
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -14,19 +14,19 @@ from langchain_core.outputs import ChatGeneration, Generation
|
|
|
14
14
|
from langchain.agents.agent import MultiActionAgentOutputParser
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class ToolAgentAction(AgentActionMessageLog):
|
|
17
|
+
class ToolAgentAction(AgentActionMessageLog):
|
|
18
18
|
tool_call_id: str
|
|
19
19
|
"""Tool call that this message is responding to."""
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
def parse_ai_message_to_tool_action(
|
|
23
23
|
message: BaseMessage,
|
|
24
|
-
) -> Union[
|
|
24
|
+
) -> Union[list[AgentAction], AgentFinish]:
|
|
25
25
|
"""Parse an AI message potentially containing tool_calls."""
|
|
26
26
|
if not isinstance(message, AIMessage):
|
|
27
27
|
raise TypeError(f"Expected an AI message got {type(message)}")
|
|
28
28
|
|
|
29
|
-
actions:
|
|
29
|
+
actions: list = []
|
|
30
30
|
if message.tool_calls:
|
|
31
31
|
tool_calls = message.tool_calls
|
|
32
32
|
else:
|
|
@@ -91,12 +91,12 @@ class ToolsAgentOutputParser(MultiActionAgentOutputParser):
|
|
|
91
91
|
return "tools-agent-output-parser"
|
|
92
92
|
|
|
93
93
|
def parse_result(
|
|
94
|
-
self, result:
|
|
95
|
-
) -> Union[
|
|
94
|
+
self, result: list[Generation], *, partial: bool = False
|
|
95
|
+
) -> Union[list[AgentAction], AgentFinish]:
|
|
96
96
|
if not isinstance(result[0], ChatGeneration):
|
|
97
97
|
raise ValueError("This output parser only works on ChatGeneration output")
|
|
98
98
|
message = result[0].message
|
|
99
99
|
return parse_ai_message_to_tool_action(message)
|
|
100
100
|
|
|
101
|
-
def parse(self, text: str) -> Union[
|
|
101
|
+
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
|
|
102
102
|
raise ValueError("Can only parse messages")
|
langchain/agents/react/agent.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
from typing import Optional, Union
|
|
4
5
|
|
|
5
6
|
from langchain_core.language_models import BaseLanguageModel
|
|
6
7
|
from langchain_core.prompts import BasePromptTemplate
|
|
@@ -20,7 +21,7 @@ def create_react_agent(
|
|
|
20
21
|
output_parser: Optional[AgentOutputParser] = None,
|
|
21
22
|
tools_renderer: ToolsRenderer = render_text_description,
|
|
22
23
|
*,
|
|
23
|
-
stop_sequence: Union[bool,
|
|
24
|
+
stop_sequence: Union[bool, list[str]] = True,
|
|
24
25
|
) -> Runnable:
|
|
25
26
|
"""Create an agent that uses ReAct prompting.
|
|
26
27
|
|
langchain/agents/react/base.py
CHANGED
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
6
7
|
|
|
7
8
|
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.documents import Document
|
|
@@ -65,7 +66,7 @@ class ReActDocstoreAgent(Agent):
|
|
|
65
66
|
return "Observation: "
|
|
66
67
|
|
|
67
68
|
@property
|
|
68
|
-
def _stop(self) ->
|
|
69
|
+
def _stop(self) -> list[str]:
|
|
69
70
|
return ["\nObservation:"]
|
|
70
71
|
|
|
71
72
|
@property
|
|
@@ -122,7 +123,7 @@ class DocstoreExplorer:
|
|
|
122
123
|
return self._paragraphs[0]
|
|
123
124
|
|
|
124
125
|
@property
|
|
125
|
-
def _paragraphs(self) ->
|
|
126
|
+
def _paragraphs(self) -> list[str]:
|
|
126
127
|
if self.document is None:
|
|
127
128
|
raise ValueError("Cannot get paragraphs without a document")
|
|
128
129
|
return self.document.page_content.split("\n\n")
|
langchain/agents/schema.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
from langchain_core.agents import AgentAction
|
|
4
4
|
from langchain_core.prompts.chat import ChatPromptTemplate
|
|
@@ -12,7 +12,7 @@ class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
|
|
|
12
12
|
return False
|
|
13
13
|
|
|
14
14
|
def _construct_agent_scratchpad(
|
|
15
|
-
self, intermediate_steps:
|
|
15
|
+
self, intermediate_steps: list[tuple[AgentAction, str]]
|
|
16
16
|
) -> str:
|
|
17
17
|
if len(intermediate_steps) == 0:
|
|
18
18
|
return ""
|
|
@@ -26,7 +26,7 @@ class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
|
|
|
26
26
|
f"you return as final answer):\n{thoughts}"
|
|
27
27
|
)
|
|
28
28
|
|
|
29
|
-
def _merge_partial_and_user_variables(self, **kwargs: Any) ->
|
|
29
|
+
def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
|
|
30
30
|
intermediate_steps = kwargs.pop("intermediate_steps")
|
|
31
31
|
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
|
|
32
32
|
intermediate_steps
|
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Union
|
|
6
7
|
|
|
7
8
|
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import re
|
|
2
|
-
from
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from typing import Any, Optional, Union
|
|
3
4
|
|
|
4
5
|
from langchain_core._api import deprecated
|
|
5
6
|
from langchain_core.agents import AgentAction
|
|
@@ -49,7 +50,7 @@ class StructuredChatAgent(Agent):
|
|
|
49
50
|
return "Thought:"
|
|
50
51
|
|
|
51
52
|
def _construct_scratchpad(
|
|
52
|
-
self, intermediate_steps:
|
|
53
|
+
self, intermediate_steps: list[tuple[AgentAction, str]]
|
|
53
54
|
) -> str:
|
|
54
55
|
agent_scratchpad = super()._construct_scratchpad(intermediate_steps)
|
|
55
56
|
if not isinstance(agent_scratchpad, str):
|
|
@@ -74,7 +75,7 @@ class StructuredChatAgent(Agent):
|
|
|
74
75
|
return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
|
|
75
76
|
|
|
76
77
|
@property
|
|
77
|
-
def _stop(self) ->
|
|
78
|
+
def _stop(self) -> list[str]:
|
|
78
79
|
return ["Observation:"]
|
|
79
80
|
|
|
80
81
|
@classmethod
|
|
@@ -85,8 +86,8 @@ class StructuredChatAgent(Agent):
|
|
|
85
86
|
suffix: str = SUFFIX,
|
|
86
87
|
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
|
|
87
88
|
format_instructions: str = FORMAT_INSTRUCTIONS,
|
|
88
|
-
input_variables: Optional[
|
|
89
|
-
memory_prompts: Optional[
|
|
89
|
+
input_variables: Optional[list[str]] = None,
|
|
90
|
+
memory_prompts: Optional[list[BasePromptTemplate]] = None,
|
|
90
91
|
) -> BasePromptTemplate:
|
|
91
92
|
tool_strings = []
|
|
92
93
|
for tool in tools:
|
|
@@ -117,8 +118,8 @@ class StructuredChatAgent(Agent):
|
|
|
117
118
|
suffix: str = SUFFIX,
|
|
118
119
|
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
|
|
119
120
|
format_instructions: str = FORMAT_INSTRUCTIONS,
|
|
120
|
-
input_variables: Optional[
|
|
121
|
-
memory_prompts: Optional[
|
|
121
|
+
input_variables: Optional[list[str]] = None,
|
|
122
|
+
memory_prompts: Optional[list[BasePromptTemplate]] = None,
|
|
122
123
|
**kwargs: Any,
|
|
123
124
|
) -> Agent:
|
|
124
125
|
"""Construct an agent from an LLM and tools."""
|
|
@@ -157,7 +158,7 @@ def create_structured_chat_agent(
|
|
|
157
158
|
prompt: ChatPromptTemplate,
|
|
158
159
|
tools_renderer: ToolsRenderer = render_text_description_and_args,
|
|
159
160
|
*,
|
|
160
|
-
stop_sequence: Union[bool,
|
|
161
|
+
stop_sequence: Union[bool, list[str]] = True,
|
|
161
162
|
) -> Runnable:
|
|
162
163
|
"""Create an agent aimed at supporting tools with multiple inputs.
|
|
163
164
|
|
|
@@ -3,7 +3,8 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import re
|
|
6
|
-
from
|
|
6
|
+
from re import Pattern
|
|
7
|
+
from typing import Optional, Union
|
|
7
8
|
|
|
8
9
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
9
10
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
from
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
from typing import Callable
|
|
2
3
|
|
|
3
4
|
from langchain_core.agents import AgentAction
|
|
4
5
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -12,7 +13,7 @@ from langchain.agents.format_scratchpad.tools import (
|
|
|
12
13
|
)
|
|
13
14
|
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
|
|
14
15
|
|
|
15
|
-
MessageFormatter = Callable[[Sequence[
|
|
16
|
+
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
def create_tool_calling_agent(
|
langchain/agents/tools.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Interface for tools."""
|
|
2
2
|
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Optional
|
|
4
4
|
|
|
5
5
|
from langchain_core.callbacks import (
|
|
6
6
|
AsyncCallbackManagerForToolRun,
|
|
@@ -9,7 +9,7 @@ from langchain_core.callbacks import (
|
|
|
9
9
|
from langchain_core.tools import BaseTool, tool
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class InvalidTool(BaseTool):
|
|
12
|
+
class InvalidTool(BaseTool):
|
|
13
13
|
"""Tool that is run when invalid tool name is encountered by agent."""
|
|
14
14
|
|
|
15
15
|
name: str = "invalid_tool"
|
|
@@ -20,7 +20,7 @@ class InvalidTool(BaseTool): # type: ignore[override]
|
|
|
20
20
|
def _run(
|
|
21
21
|
self,
|
|
22
22
|
requested_tool_name: str,
|
|
23
|
-
available_tool_names:
|
|
23
|
+
available_tool_names: list[str],
|
|
24
24
|
run_manager: Optional[CallbackManagerForToolRun] = None,
|
|
25
25
|
) -> str:
|
|
26
26
|
"""Use the tool."""
|
|
@@ -33,7 +33,7 @@ class InvalidTool(BaseTool): # type: ignore[override]
|
|
|
33
33
|
async def _arun(
|
|
34
34
|
self,
|
|
35
35
|
requested_tool_name: str,
|
|
36
|
-
available_tool_names:
|
|
36
|
+
available_tool_names: list[str],
|
|
37
37
|
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
|
38
38
|
) -> str:
|
|
39
39
|
"""Use the tool asynchronously."""
|
langchain/agents/types.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Union
|
|
2
2
|
|
|
3
3
|
from langchain.agents.agent import BaseSingleActionAgent
|
|
4
4
|
from langchain.agents.agent_types import AgentType
|
|
@@ -12,9 +12,9 @@ from langchain.agents.react.base import ReActDocstoreAgent
|
|
|
12
12
|
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent
|
|
13
13
|
from langchain.agents.structured_chat.base import StructuredChatAgent
|
|
14
14
|
|
|
15
|
-
AGENT_TYPE = Union[
|
|
15
|
+
AGENT_TYPE = Union[type[BaseSingleActionAgent], type[OpenAIMultiFunctionsAgent]]
|
|
16
16
|
|
|
17
|
-
AGENT_TO_CLASS:
|
|
17
|
+
AGENT_TO_CLASS: dict[AgentType, AGENT_TYPE] = {
|
|
18
18
|
AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent,
|
|
19
19
|
AgentType.REACT_DOCSTORE: ReActDocstoreAgent,
|
|
20
20
|
AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent,
|
langchain/agents/utils.py
CHANGED
langchain/agents/xml/base.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
from
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
from typing import Any, Union
|
|
2
3
|
|
|
3
4
|
from langchain_core._api import deprecated
|
|
4
5
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
@@ -38,13 +39,13 @@ class XMLAgent(BaseSingleActionAgent):
|
|
|
38
39
|
|
|
39
40
|
"""
|
|
40
41
|
|
|
41
|
-
tools:
|
|
42
|
+
tools: list[BaseTool]
|
|
42
43
|
"""List of tools this agent has access to."""
|
|
43
44
|
llm_chain: LLMChain
|
|
44
45
|
"""Chain to use to predict action."""
|
|
45
46
|
|
|
46
47
|
@property
|
|
47
|
-
def input_keys(self) ->
|
|
48
|
+
def input_keys(self) -> list[str]:
|
|
48
49
|
return ["input"]
|
|
49
50
|
|
|
50
51
|
@staticmethod
|
|
@@ -60,7 +61,7 @@ class XMLAgent(BaseSingleActionAgent):
|
|
|
60
61
|
|
|
61
62
|
def plan(
|
|
62
63
|
self,
|
|
63
|
-
intermediate_steps:
|
|
64
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
64
65
|
callbacks: Callbacks = None,
|
|
65
66
|
**kwargs: Any,
|
|
66
67
|
) -> Union[AgentAction, AgentFinish]:
|
|
@@ -84,7 +85,7 @@ class XMLAgent(BaseSingleActionAgent):
|
|
|
84
85
|
|
|
85
86
|
async def aplan(
|
|
86
87
|
self,
|
|
87
|
-
intermediate_steps:
|
|
88
|
+
intermediate_steps: list[tuple[AgentAction, str]],
|
|
88
89
|
callbacks: Callbacks = None,
|
|
89
90
|
**kwargs: Any,
|
|
90
91
|
) -> Union[AgentAction, AgentFinish]:
|
|
@@ -113,7 +114,7 @@ def create_xml_agent(
|
|
|
113
114
|
prompt: BasePromptTemplate,
|
|
114
115
|
tools_renderer: ToolsRenderer = render_text_description,
|
|
115
116
|
*,
|
|
116
|
-
stop_sequence: Union[bool,
|
|
117
|
+
stop_sequence: Union[bool, list[str]] = True,
|
|
117
118
|
) -> Runnable:
|
|
118
119
|
"""Create an agent that uses XML to format its logic.
|
|
119
120
|
|