langchain 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +1 -0
- langchain/_api/module_import.py +2 -2
- langchain/agents/__init__.py +5 -4
- langchain/agents/agent.py +272 -50
- langchain/agents/agent_iterator.py +20 -0
- langchain/agents/agent_toolkits/__init__.py +1 -0
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
- langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
- langchain/agents/agent_types.py +1 -0
- langchain/agents/chat/base.py +37 -1
- langchain/agents/chat/output_parser.py +14 -0
- langchain/agents/conversational/base.py +38 -6
- langchain/agents/conversational/output_parser.py +10 -0
- langchain/agents/conversational_chat/base.py +42 -3
- langchain/agents/format_scratchpad/__init__.py +1 -0
- langchain/agents/format_scratchpad/log.py +12 -1
- langchain/agents/format_scratchpad/log_to_messages.py +10 -1
- langchain/agents/format_scratchpad/openai_functions.py +10 -5
- langchain/agents/format_scratchpad/tools.py +11 -7
- langchain/agents/initialize.py +15 -7
- langchain/agents/json_chat/base.py +9 -3
- langchain/agents/loading.py +7 -0
- langchain/agents/mrkl/base.py +39 -10
- langchain/agents/mrkl/output_parser.py +12 -0
- langchain/agents/openai_assistant/base.py +37 -14
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
- langchain/agents/openai_functions_agent/base.py +61 -10
- langchain/agents/openai_functions_multi_agent/base.py +22 -7
- langchain/agents/openai_tools/base.py +3 -0
- langchain/agents/output_parsers/__init__.py +1 -0
- langchain/agents/react/base.py +1 -0
- langchain/agents/self_ask_with_search/base.py +1 -0
- langchain/agents/structured_chat/output_parser.py +3 -3
- langchain/agents/tool_calling_agent/base.py +13 -3
- langchain/agents/tools.py +3 -0
- langchain/agents/utils.py +9 -1
- langchain/base_language.py +1 -0
- langchain/callbacks/__init__.py +1 -0
- langchain/callbacks/base.py +1 -0
- langchain/callbacks/streaming_stdout.py +1 -0
- langchain/callbacks/streaming_stdout_final_only.py +1 -0
- langchain/callbacks/tracers/evaluation.py +1 -0
- langchain/chains/api/base.py +5 -2
- langchain/chains/base.py +1 -1
- langchain/chains/combine_documents/base.py +59 -0
- langchain/chains/combine_documents/map_reduce.py +4 -2
- langchain/chains/combine_documents/map_rerank.py +5 -3
- langchain/chains/combine_documents/refine.py +4 -2
- langchain/chains/combine_documents/stuff.py +1 -0
- langchain/chains/constitutional_ai/base.py +1 -0
- langchain/chains/constitutional_ai/models.py +1 -0
- langchain/chains/constitutional_ai/principles.py +1 -0
- langchain/chains/conversation/base.py +81 -1
- langchain/chains/conversational_retrieval/base.py +2 -1
- langchain/chains/elasticsearch_database/base.py +2 -1
- langchain/chains/hyde/base.py +1 -0
- langchain/chains/llm.py +4 -2
- langchain/chains/llm_checker/base.py +4 -3
- langchain/chains/llm_math/base.py +1 -0
- langchain/chains/loading.py +2 -1
- langchain/chains/mapreduce.py +1 -0
- langchain/chains/moderation.py +1 -1
- langchain/chains/natbot/base.py +1 -0
- langchain/chains/openai_functions/base.py +1 -0
- langchain/chains/openai_functions/extraction.py +6 -6
- langchain/chains/openai_tools/extraction.py +3 -3
- langchain/chains/qa_generation/base.py +47 -1
- langchain/chains/qa_with_sources/__init__.py +1 -0
- langchain/chains/qa_with_sources/loading.py +1 -0
- langchain/chains/qa_with_sources/vector_db.py +1 -1
- langchain/chains/query_constructor/base.py +1 -0
- langchain/chains/query_constructor/ir.py +1 -0
- langchain/chains/question_answering/chain.py +1 -0
- langchain/chains/retrieval_qa/base.py +3 -2
- langchain/chains/router/base.py +1 -0
- langchain/chains/router/llm_router.py +2 -1
- langchain/chains/router/multi_prompt.py +1 -0
- langchain/chains/router/multi_retrieval_qa.py +1 -0
- langchain/chains/sequential.py +2 -1
- langchain/chains/structured_output/base.py +12 -12
- langchain/chains/summarize/chain.py +1 -0
- langchain/chains/transform.py +4 -3
- langchain/chat_models/__init__.py +1 -0
- langchain/chat_models/base.py +2 -2
- langchain/docstore/__init__.py +1 -0
- langchain/document_loaders/__init__.py +1 -0
- langchain/document_transformers/__init__.py +1 -0
- langchain/embeddings/__init__.py +0 -1
- langchain/evaluation/__init__.py +2 -1
- langchain/evaluation/agents/__init__.py +1 -0
- langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
- langchain/evaluation/comparison/__init__.py +1 -0
- langchain/evaluation/comparison/eval_chain.py +1 -0
- langchain/evaluation/comparison/prompt.py +1 -0
- langchain/evaluation/embedding_distance/__init__.py +1 -0
- langchain/evaluation/embedding_distance/base.py +1 -0
- langchain/evaluation/loading.py +1 -0
- langchain/evaluation/parsing/base.py +1 -0
- langchain/evaluation/qa/__init__.py +1 -0
- langchain/evaluation/qa/eval_chain.py +1 -0
- langchain/evaluation/qa/generate_chain.py +1 -0
- langchain/evaluation/schema.py +1 -0
- langchain/evaluation/scoring/__init__.py +1 -0
- langchain/evaluation/scoring/eval_chain.py +1 -0
- langchain/evaluation/scoring/prompt.py +1 -0
- langchain/evaluation/string_distance/__init__.py +1 -0
- langchain/example_generator.py +1 -0
- langchain/formatting.py +1 -0
- langchain/globals/__init__.py +1 -0
- langchain/graphs/__init__.py +1 -0
- langchain/indexes/__init__.py +1 -0
- langchain/indexes/_sql_record_manager.py +9 -5
- langchain/indexes/graph.py +1 -0
- langchain/indexes/prompts/__init__.py +1 -0
- langchain/input.py +1 -0
- langchain/llms/__init__.py +1 -0
- langchain/load/__init__.py +1 -0
- langchain/memory/__init__.py +5 -0
- langchain/memory/vectorstore_token_buffer_memory.py +184 -0
- langchain/output_parsers/__init__.py +1 -0
- langchain/output_parsers/combining.py +1 -1
- langchain/output_parsers/enum.py +7 -3
- langchain/output_parsers/fix.py +57 -16
- langchain/output_parsers/pandas_dataframe.py +1 -1
- langchain/output_parsers/regex.py +1 -1
- langchain/output_parsers/regex_dict.py +1 -1
- langchain/output_parsers/retry.py +76 -29
- langchain/output_parsers/structured.py +3 -3
- langchain/output_parsers/yaml.py +4 -0
- langchain/prompts/__init__.py +1 -0
- langchain/prompts/example_selector/__init__.py +1 -0
- langchain/python.py +1 -0
- langchain/requests.py +1 -0
- langchain/retrievers/__init__.py +1 -0
- langchain/retrievers/document_compressors/chain_extract.py +1 -0
- langchain/retrievers/document_compressors/chain_filter.py +1 -0
- langchain/retrievers/ensemble.py +18 -3
- langchain/retrievers/multi_query.py +2 -1
- langchain/retrievers/re_phraser.py +2 -1
- langchain/retrievers/self_query/base.py +9 -8
- langchain/schema/__init__.py +1 -0
- langchain/schema/runnable/__init__.py +1 -0
- langchain/serpapi.py +1 -0
- langchain/smith/__init__.py +6 -5
- langchain/smith/evaluation/__init__.py +0 -1
- langchain/smith/evaluation/string_run_evaluator.py +1 -0
- langchain/sql_database.py +1 -0
- langchain/storage/__init__.py +1 -0
- langchain/storage/_lc_store.py +1 -0
- langchain/storage/in_memory.py +1 -0
- langchain/text_splitter.py +1 -0
- langchain/tools/__init__.py +1 -0
- langchain/tools/amadeus/__init__.py +1 -0
- langchain/tools/azure_cognitive_services/__init__.py +1 -0
- langchain/tools/bing_search/__init__.py +1 -0
- langchain/tools/dataforseo_api_search/__init__.py +1 -0
- langchain/tools/ddg_search/__init__.py +1 -0
- langchain/tools/edenai/__init__.py +1 -0
- langchain/tools/eleven_labs/__init__.py +1 -0
- langchain/tools/file_management/__init__.py +1 -0
- langchain/tools/github/__init__.py +1 -1
- langchain/tools/gitlab/__init__.py +1 -1
- langchain/tools/gmail/__init__.py +1 -0
- langchain/tools/golden_query/__init__.py +1 -0
- langchain/tools/google_cloud/__init__.py +1 -0
- langchain/tools/google_finance/__init__.py +1 -0
- langchain/tools/google_jobs/__init__.py +1 -0
- langchain/tools/google_lens/__init__.py +1 -0
- langchain/tools/google_places/__init__.py +1 -0
- langchain/tools/google_scholar/__init__.py +1 -0
- langchain/tools/google_search/__init__.py +1 -0
- langchain/tools/google_trends/__init__.py +1 -0
- langchain/tools/human/__init__.py +1 -0
- langchain/tools/memorize/__init__.py +1 -0
- langchain/tools/metaphor_search/__init__.py +1 -0
- langchain/tools/multion/__init__.py +1 -0
- langchain/tools/office365/__init__.py +1 -0
- langchain/tools/openapi/utils/openapi_utils.py +1 -0
- langchain/tools/openweathermap/__init__.py +1 -0
- langchain/tools/playwright/__init__.py +1 -0
- langchain/tools/shell/__init__.py +1 -0
- langchain/tools/slack/__init__.py +1 -0
- langchain/tools/sql_database/prompt.py +1 -0
- langchain/tools/steamship_image_generation/__init__.py +1 -0
- langchain/tools/tavily_search/__init__.py +1 -0
- langchain/tools/wolfram_alpha/__init__.py +1 -0
- langchain/tools/zapier/__init__.py +1 -0
- langchain/utilities/__init__.py +1 -0
- langchain/utilities/python.py +1 -0
- langchain/vectorstores/__init__.py +1 -0
- {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/METADATA +3 -4
- {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/RECORD +197 -196
- {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/LICENSE +0 -0
- {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/WHEEL +0 -0
- {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/entry_points.txt +0 -0
|
@@ -62,6 +62,22 @@ class AgentExecutorIterator:
|
|
|
62
62
|
"""
|
|
63
63
|
Initialize the AgentExecutorIterator with the given AgentExecutor,
|
|
64
64
|
inputs, and optional callbacks.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
agent_executor (AgentExecutor): The AgentExecutor to iterate over.
|
|
68
|
+
inputs (Any): The inputs to the AgentExecutor.
|
|
69
|
+
callbacks (Callbacks, optional): The callbacks to use during iteration.
|
|
70
|
+
Defaults to None.
|
|
71
|
+
tags (Optional[list[str]], optional): The tags to use during iteration.
|
|
72
|
+
Defaults to None.
|
|
73
|
+
metadata (Optional[Dict[str, Any]], optional): The metadata to use
|
|
74
|
+
during iteration. Defaults to None.
|
|
75
|
+
run_name (Optional[str], optional): The name of the run. Defaults to None.
|
|
76
|
+
run_id (Optional[UUID], optional): The ID of the run. Defaults to None.
|
|
77
|
+
include_run_info (bool, optional): Whether to include run info
|
|
78
|
+
in the output. Defaults to False.
|
|
79
|
+
yield_actions (bool, optional): Whether to yield actions as they
|
|
80
|
+
are generated. Defaults to False.
|
|
65
81
|
"""
|
|
66
82
|
self._agent_executor = agent_executor
|
|
67
83
|
self.inputs = inputs
|
|
@@ -85,6 +101,7 @@ class AgentExecutorIterator:
|
|
|
85
101
|
|
|
86
102
|
@property
|
|
87
103
|
def inputs(self) -> Dict[str, str]:
|
|
104
|
+
"""The inputs to the AgentExecutor."""
|
|
88
105
|
return self._inputs
|
|
89
106
|
|
|
90
107
|
@inputs.setter
|
|
@@ -93,6 +110,7 @@ class AgentExecutorIterator:
|
|
|
93
110
|
|
|
94
111
|
@property
|
|
95
112
|
def agent_executor(self) -> AgentExecutor:
|
|
113
|
+
"""The AgentExecutor to iterate over."""
|
|
96
114
|
return self._agent_executor
|
|
97
115
|
|
|
98
116
|
@agent_executor.setter
|
|
@@ -103,10 +121,12 @@ class AgentExecutorIterator:
|
|
|
103
121
|
|
|
104
122
|
@property
|
|
105
123
|
def name_to_tool_map(self) -> Dict[str, BaseTool]:
|
|
124
|
+
"""A mapping of tool names to tools."""
|
|
106
125
|
return {tool.name: tool for tool in self.agent_executor.tools}
|
|
107
126
|
|
|
108
127
|
@property
|
|
109
128
|
def color_mapping(self) -> Dict[str, str]:
|
|
129
|
+
"""A mapping of tool names to colors."""
|
|
110
130
|
return get_color_mapping(
|
|
111
131
|
[tool.name for tool in self.agent_executor.tools],
|
|
112
132
|
excluded_colors=["green", "red"],
|
langchain/agents/agent_types.py
CHANGED
langchain/agents/chat/base.py
CHANGED
|
@@ -80,6 +80,23 @@ class ChatAgent(Agent):
|
|
|
80
80
|
format_instructions: str = FORMAT_INSTRUCTIONS,
|
|
81
81
|
input_variables: Optional[List[str]] = None,
|
|
82
82
|
) -> BasePromptTemplate:
|
|
83
|
+
"""Create a prompt from a list of tools.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
tools: A list of tools.
|
|
87
|
+
system_message_prefix: The system message prefix.
|
|
88
|
+
Default is SYSTEM_MESSAGE_PREFIX.
|
|
89
|
+
system_message_suffix: The system message suffix.
|
|
90
|
+
Default is SYSTEM_MESSAGE_SUFFIX.
|
|
91
|
+
human_message: The human message. Default is HUMAN_MESSAGE.
|
|
92
|
+
format_instructions: The format instructions.
|
|
93
|
+
Default is FORMAT_INSTRUCTIONS.
|
|
94
|
+
input_variables: The input variables. Default is None.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
A prompt template.
|
|
98
|
+
"""
|
|
99
|
+
|
|
83
100
|
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
|
|
84
101
|
tool_names = ", ".join([tool.name for tool in tools])
|
|
85
102
|
format_instructions = format_instructions.format(tool_names=tool_names)
|
|
@@ -113,7 +130,26 @@ class ChatAgent(Agent):
|
|
|
113
130
|
input_variables: Optional[List[str]] = None,
|
|
114
131
|
**kwargs: Any,
|
|
115
132
|
) -> Agent:
|
|
116
|
-
"""Construct an agent from an LLM and tools.
|
|
133
|
+
"""Construct an agent from an LLM and tools.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
llm: The language model.
|
|
137
|
+
tools: A list of tools.
|
|
138
|
+
callback_manager: The callback manager. Default is None.
|
|
139
|
+
output_parser: The output parser. Default is None.
|
|
140
|
+
system_message_prefix: The system message prefix.
|
|
141
|
+
Default is SYSTEM_MESSAGE_PREFIX.
|
|
142
|
+
system_message_suffix: The system message suffix.
|
|
143
|
+
Default is SYSTEM_MESSAGE_SUFFIX.
|
|
144
|
+
human_message: The human message. Default is HUMAN_MESSAGE.
|
|
145
|
+
format_instructions: The format instructions.
|
|
146
|
+
Default is FORMAT_INSTRUCTIONS.
|
|
147
|
+
input_variables: The input variables. Default is None.
|
|
148
|
+
kwargs: Additional keyword arguments.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
An agent.
|
|
152
|
+
"""
|
|
117
153
|
cls._validate_tools(tools)
|
|
118
154
|
prompt = cls.create_prompt(
|
|
119
155
|
tools,
|
|
@@ -25,6 +25,20 @@ class ChatOutputParser(AgentOutputParser):
|
|
|
25
25
|
return self.format_instructions
|
|
26
26
|
|
|
27
27
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
|
28
|
+
"""Parse the output from the agent into
|
|
29
|
+
an AgentAction or AgentFinish object.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
text: The text to parse.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
An AgentAction or AgentFinish object.
|
|
36
|
+
|
|
37
|
+
Raises:
|
|
38
|
+
OutputParserException: If the output could not be parsed.
|
|
39
|
+
ValueError: If the action could not be found.
|
|
40
|
+
"""
|
|
41
|
+
|
|
28
42
|
includes_answer = FINAL_ANSWER_ACTION in text
|
|
29
43
|
try:
|
|
30
44
|
found = self.pattern.search(text)
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
"""An agent designed to hold a conversation in addition to using tools."""
|
|
2
|
+
|
|
2
3
|
from __future__ import annotations
|
|
3
4
|
|
|
4
5
|
from typing import Any, List, Optional, Sequence
|
|
@@ -40,12 +41,20 @@ class ConversationalAgent(Agent):
|
|
|
40
41
|
|
|
41
42
|
@property
|
|
42
43
|
def observation_prefix(self) -> str:
|
|
43
|
-
"""Prefix to append the observation with.
|
|
44
|
+
"""Prefix to append the observation with.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
"Observation: "
|
|
48
|
+
"""
|
|
44
49
|
return "Observation: "
|
|
45
50
|
|
|
46
51
|
@property
|
|
47
52
|
def llm_prefix(self) -> str:
|
|
48
|
-
"""Prefix to append the llm call with.
|
|
53
|
+
"""Prefix to append the llm call with.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
"Thought: "
|
|
57
|
+
"""
|
|
49
58
|
return "Thought:"
|
|
50
59
|
|
|
51
60
|
@classmethod
|
|
@@ -64,11 +73,15 @@ class ConversationalAgent(Agent):
|
|
|
64
73
|
Args:
|
|
65
74
|
tools: List of tools the agent will have access to, used to format the
|
|
66
75
|
prompt.
|
|
67
|
-
prefix: String to put before the list of tools.
|
|
68
|
-
suffix: String to put after the list of tools.
|
|
69
|
-
|
|
76
|
+
prefix: String to put before the list of tools. Defaults to PREFIX.
|
|
77
|
+
suffix: String to put after the list of tools. Defaults to SUFFIX.
|
|
78
|
+
format_instructions: Instructions on how to use the tools. Defaults to
|
|
79
|
+
FORMAT_INSTRUCTIONS
|
|
80
|
+
ai_prefix: String to use before AI output. Defaults to "AI".
|
|
70
81
|
human_prefix: String to use before human output.
|
|
82
|
+
Defaults to "Human".
|
|
71
83
|
input_variables: List of input variables the final prompt will expect.
|
|
84
|
+
Defaults to ["input", "chat_history", "agent_scratchpad"].
|
|
72
85
|
|
|
73
86
|
Returns:
|
|
74
87
|
A PromptTemplate with the template assembled from the pieces here.
|
|
@@ -105,7 +118,26 @@ class ConversationalAgent(Agent):
|
|
|
105
118
|
input_variables: Optional[List[str]] = None,
|
|
106
119
|
**kwargs: Any,
|
|
107
120
|
) -> Agent:
|
|
108
|
-
"""Construct an agent from an LLM and tools.
|
|
121
|
+
"""Construct an agent from an LLM and tools.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
llm: The language model to use.
|
|
125
|
+
tools: A list of tools to use.
|
|
126
|
+
callback_manager: The callback manager to use. Default is None.
|
|
127
|
+
output_parser: The output parser to use. Default is None.
|
|
128
|
+
prefix: The prefix to use in the prompt. Default is PREFIX.
|
|
129
|
+
suffix: The suffix to use in the prompt. Default is SUFFIX.
|
|
130
|
+
format_instructions: The format instructions to use.
|
|
131
|
+
Default is FORMAT_INSTRUCTIONS.
|
|
132
|
+
ai_prefix: The prefix to use before AI output. Default is "AI".
|
|
133
|
+
human_prefix: The prefix to use before human output.
|
|
134
|
+
Default is "Human".
|
|
135
|
+
input_variables: The input variables to use. Default is None.
|
|
136
|
+
**kwargs: Any additional keyword arguments to pass to the agent.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
An agent.
|
|
140
|
+
"""
|
|
109
141
|
cls._validate_tools(tools)
|
|
110
142
|
prompt = cls.create_prompt(
|
|
111
143
|
tools,
|
|
@@ -22,6 +22,16 @@ class ConvoOutputParser(AgentOutputParser):
|
|
|
22
22
|
return self.format_instructions
|
|
23
23
|
|
|
24
24
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
|
25
|
+
"""Parse the output from the agent into
|
|
26
|
+
an AgentAction or AgentFinish object.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
text: The text to parse.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
An AgentAction or AgentFinish object.
|
|
33
|
+
"""
|
|
34
|
+
|
|
25
35
|
if f"{self.ai_prefix}:" in text:
|
|
26
36
|
return AgentFinish(
|
|
27
37
|
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
"""An agent designed to hold a conversation in addition to using tools."""
|
|
2
|
+
|
|
2
3
|
from __future__ import annotations
|
|
3
4
|
|
|
4
5
|
from typing import Any, List, Optional, Sequence, Tuple
|
|
@@ -35,7 +36,9 @@ class ConversationalChatAgent(Agent):
|
|
|
35
36
|
"""An agent designed to hold a conversation in addition to using tools."""
|
|
36
37
|
|
|
37
38
|
output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser)
|
|
39
|
+
"""Output parser for the agent."""
|
|
38
40
|
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
|
|
41
|
+
"""Template for the tool response."""
|
|
39
42
|
|
|
40
43
|
@classmethod
|
|
41
44
|
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
|
|
@@ -47,12 +50,20 @@ class ConversationalChatAgent(Agent):
|
|
|
47
50
|
|
|
48
51
|
@property
|
|
49
52
|
def observation_prefix(self) -> str:
|
|
50
|
-
"""Prefix to append the observation with.
|
|
53
|
+
"""Prefix to append the observation with.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
"Observation: "
|
|
57
|
+
"""
|
|
51
58
|
return "Observation: "
|
|
52
59
|
|
|
53
60
|
@property
|
|
54
61
|
def llm_prefix(self) -> str:
|
|
55
|
-
"""Prefix to append the llm call with.
|
|
62
|
+
"""Prefix to append the llm call with.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
"Thought: "
|
|
66
|
+
"""
|
|
56
67
|
return "Thought:"
|
|
57
68
|
|
|
58
69
|
@classmethod
|
|
@@ -69,6 +80,20 @@ class ConversationalChatAgent(Agent):
|
|
|
69
80
|
input_variables: Optional[List[str]] = None,
|
|
70
81
|
output_parser: Optional[BaseOutputParser] = None,
|
|
71
82
|
) -> BasePromptTemplate:
|
|
83
|
+
"""Create a prompt for the agent.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
tools: The tools to use.
|
|
87
|
+
system_message: The system message to use.
|
|
88
|
+
Defaults to the PREFIX.
|
|
89
|
+
human_message: The human message to use.
|
|
90
|
+
Defaults to the SUFFIX.
|
|
91
|
+
input_variables: The input variables to use. Defaults to None.
|
|
92
|
+
output_parser: The output parser to use. Defaults to None.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
A PromptTemplate.
|
|
96
|
+
"""
|
|
72
97
|
tool_strings = "\n".join(
|
|
73
98
|
[f"> {tool.name}: {tool.description}" for tool in tools]
|
|
74
99
|
)
|
|
@@ -115,7 +140,21 @@ class ConversationalChatAgent(Agent):
|
|
|
115
140
|
input_variables: Optional[List[str]] = None,
|
|
116
141
|
**kwargs: Any,
|
|
117
142
|
) -> Agent:
|
|
118
|
-
"""Construct an agent from an LLM and tools.
|
|
143
|
+
"""Construct an agent from an LLM and tools.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
llm: The language model to use.
|
|
147
|
+
tools: A list of tools to use.
|
|
148
|
+
callback_manager: The callback manager to use. Default is None.
|
|
149
|
+
output_parser: The output parser to use. Default is None.
|
|
150
|
+
system_message: The system message to use. Default is PREFIX.
|
|
151
|
+
human_message: The human message to use. Default is SUFFIX.
|
|
152
|
+
input_variables: The input variables to use. Default is None.
|
|
153
|
+
**kwargs: Any additional arguments.
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
An agent.
|
|
157
|
+
"""
|
|
119
158
|
cls._validate_tools(tools)
|
|
120
159
|
_output_parser = output_parser or cls._get_default_output_parser()
|
|
121
160
|
prompt = cls.create_prompt(
|
|
@@ -5,6 +5,7 @@ that result from previous iterations of the agent.
|
|
|
5
5
|
Depending on the prompting strategy you are using, you may want to format these
|
|
6
6
|
differently before passing them into the LLM.
|
|
7
7
|
"""
|
|
8
|
+
|
|
8
9
|
from langchain.agents.format_scratchpad.log import format_log_to_str
|
|
9
10
|
from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages
|
|
10
11
|
from langchain.agents.format_scratchpad.openai_functions import (
|
|
@@ -8,7 +8,18 @@ def format_log_to_str(
|
|
|
8
8
|
observation_prefix: str = "Observation: ",
|
|
9
9
|
llm_prefix: str = "Thought: ",
|
|
10
10
|
) -> str:
|
|
11
|
-
"""Construct the scratchpad that lets the agent continue its thought process.
|
|
11
|
+
"""Construct the scratchpad that lets the agent continue its thought process.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
intermediate_steps: List of tuples of AgentAction and observation strings.
|
|
15
|
+
observation_prefix: Prefix to append the observation with.
|
|
16
|
+
Defaults to "Observation: ".
|
|
17
|
+
llm_prefix: Prefix to append the llm call with.
|
|
18
|
+
Defaults to "Thought: ".
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
str: The scratchpad.
|
|
22
|
+
"""
|
|
12
23
|
thoughts = ""
|
|
13
24
|
for action, observation in intermediate_steps:
|
|
14
25
|
thoughts += action.log
|
|
@@ -8,7 +8,16 @@ def format_log_to_messages(
|
|
|
8
8
|
intermediate_steps: List[Tuple[AgentAction, str]],
|
|
9
9
|
template_tool_response: str = "{observation}",
|
|
10
10
|
) -> List[BaseMessage]:
|
|
11
|
-
"""Construct the scratchpad that lets the agent continue its thought process.
|
|
11
|
+
"""Construct the scratchpad that lets the agent continue its thought process.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
intermediate_steps: List of tuples of AgentAction and observation strings.
|
|
15
|
+
template_tool_response: Template to format the observation with.
|
|
16
|
+
Defaults to "{observation}".
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
List[BaseMessage]: The scratchpad.
|
|
20
|
+
"""
|
|
12
21
|
thoughts: List[BaseMessage] = []
|
|
13
22
|
for action, observation in intermediate_steps:
|
|
14
23
|
thoughts.append(AIMessage(content=action.log))
|
|
@@ -16,7 +16,8 @@ def _convert_agent_action_to_messages(
|
|
|
16
16
|
agent_action: Agent action to convert.
|
|
17
17
|
|
|
18
18
|
Returns:
|
|
19
|
-
AIMessage
|
|
19
|
+
AIMessage or the previous messages plus a FunctionMessage that corresponds to
|
|
20
|
+
the original tool invocation
|
|
20
21
|
"""
|
|
21
22
|
if isinstance(agent_action, AgentActionMessageLog):
|
|
22
23
|
return list(agent_action.message_log) + [
|
|
@@ -31,10 +32,13 @@ def _create_function_message(
|
|
|
31
32
|
) -> FunctionMessage:
|
|
32
33
|
"""Convert agent action and observation into a function message.
|
|
33
34
|
Args:
|
|
34
|
-
agent_action: the tool invocation request from the agent
|
|
35
|
-
observation: the result of the tool invocation
|
|
35
|
+
agent_action: the tool invocation request from the agent.
|
|
36
|
+
observation: the result of the tool invocation.
|
|
36
37
|
Returns:
|
|
37
|
-
FunctionMessage that corresponds to the original tool invocation
|
|
38
|
+
FunctionMessage that corresponds to the original tool invocation.
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
ValueError: if the observation cannot be converted to a string.
|
|
38
42
|
"""
|
|
39
43
|
if not isinstance(observation, str):
|
|
40
44
|
try:
|
|
@@ -59,7 +63,8 @@ def format_to_openai_function_messages(
|
|
|
59
63
|
|
|
60
64
|
Returns:
|
|
61
65
|
list of messages to send to the LLM for the next prediction
|
|
62
|
-
|
|
66
|
+
Raises:
|
|
67
|
+
ValueError: if the observation cannot be converted to a string.
|
|
63
68
|
"""
|
|
64
69
|
messages = []
|
|
65
70
|
|
|
@@ -14,12 +14,16 @@ from langchain.agents.output_parsers.tools import ToolAgentAction
|
|
|
14
14
|
def _create_tool_message(
|
|
15
15
|
agent_action: ToolAgentAction, observation: str
|
|
16
16
|
) -> ToolMessage:
|
|
17
|
-
"""Convert agent action and observation into a
|
|
17
|
+
"""Convert agent action and observation into a tool message.
|
|
18
|
+
|
|
18
19
|
Args:
|
|
19
|
-
agent_action: the tool invocation request from the agent
|
|
20
|
-
observation: the result of the tool invocation
|
|
20
|
+
agent_action: the tool invocation request from the agent.
|
|
21
|
+
observation: the result of the tool invocation.
|
|
21
22
|
Returns:
|
|
22
|
-
|
|
23
|
+
ToolMessage that corresponds to the original tool invocation.
|
|
24
|
+
|
|
25
|
+
Raises:
|
|
26
|
+
ValueError: if the observation cannot be converted to a string.
|
|
23
27
|
"""
|
|
24
28
|
if not isinstance(observation, str):
|
|
25
29
|
try:
|
|
@@ -38,13 +42,13 @@ def _create_tool_message(
|
|
|
38
42
|
def format_to_tool_messages(
|
|
39
43
|
intermediate_steps: Sequence[Tuple[AgentAction, str]],
|
|
40
44
|
) -> List[BaseMessage]:
|
|
41
|
-
"""Convert (AgentAction, tool output) tuples into
|
|
45
|
+
"""Convert (AgentAction, tool output) tuples into ToolMessages.
|
|
42
46
|
|
|
43
47
|
Args:
|
|
44
|
-
intermediate_steps: Steps the LLM has taken to date, along with observations
|
|
48
|
+
intermediate_steps: Steps the LLM has taken to date, along with observations.
|
|
45
49
|
|
|
46
50
|
Returns:
|
|
47
|
-
list of messages to send to the LLM for the next prediction
|
|
51
|
+
list of messages to send to the LLM for the next prediction.
|
|
48
52
|
|
|
49
53
|
"""
|
|
50
54
|
messages = []
|
langchain/agents/initialize.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
"""Load agent."""
|
|
2
|
+
|
|
2
3
|
from typing import Any, Optional, Sequence
|
|
3
4
|
|
|
4
5
|
from langchain_core._api import deprecated
|
|
@@ -35,17 +36,24 @@ def initialize_agent(
|
|
|
35
36
|
Args:
|
|
36
37
|
tools: List of tools this agent has access to.
|
|
37
38
|
llm: Language model to use as the agent.
|
|
38
|
-
agent: Agent type to use. If None and agent_path is also None, will default
|
|
39
|
-
AgentType.ZERO_SHOT_REACT_DESCRIPTION.
|
|
39
|
+
agent: Agent type to use. If None and agent_path is also None, will default
|
|
40
|
+
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
|
|
40
41
|
callback_manager: CallbackManager to use. Global callback manager is used if
|
|
41
42
|
not provided. Defaults to None.
|
|
42
|
-
agent_path: Path to serialized agent to use.
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
43
|
+
agent_path: Path to serialized agent to use. If None and agent is also None,
|
|
44
|
+
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
|
|
45
|
+
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
|
|
46
|
+
Defaults to None.
|
|
47
|
+
tags: Tags to apply to the traced runs. Defaults to None.
|
|
48
|
+
**kwargs: Additional keyword arguments passed to the agent executor.
|
|
46
49
|
|
|
47
50
|
Returns:
|
|
48
|
-
An agent executor
|
|
51
|
+
An agent executor.
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
ValueError: If both `agent` and `agent_path` are specified.
|
|
55
|
+
ValueError: If `agent` is not a valid agent type.
|
|
56
|
+
ValueError: If both `agent` and `agent_path` are None.
|
|
49
57
|
"""
|
|
50
58
|
tags_ = list(tags) if tags else []
|
|
51
59
|
if agent is None and agent_path is None:
|
|
@@ -36,11 +36,17 @@ def create_json_chat_agent(
|
|
|
36
36
|
then passed into the LLM. Default is `render_text_description`.
|
|
37
37
|
template_tool_response: Template prompt that uses the tool response (observation)
|
|
38
38
|
to make the LLM generate the next action to take.
|
|
39
|
+
Default is TEMPLATE_TOOL_RESPONSE.
|
|
39
40
|
|
|
40
41
|
Returns:
|
|
41
42
|
A Runnable sequence representing an agent. It takes as input all the same input
|
|
42
43
|
variables as the prompt passed in does. It returns as output either an
|
|
43
44
|
AgentAction or AgentFinish.
|
|
45
|
+
|
|
46
|
+
Raises:
|
|
47
|
+
ValueError: If the prompt is missing required variables.
|
|
48
|
+
ValueError: If the template_tool_response is missing
|
|
49
|
+
the required variable 'observation'.
|
|
44
50
|
|
|
45
51
|
Example:
|
|
46
52
|
|
|
@@ -122,8 +128,8 @@ def create_json_chat_agent(
|
|
|
122
128
|
|
|
123
129
|
```json
|
|
124
130
|
{{
|
|
125
|
-
"action": string,
|
|
126
|
-
"action_input": string
|
|
131
|
+
"action": string, \\ The action to take. Must be one of {tool_names}
|
|
132
|
+
"action_input": string \\ The input to the action
|
|
127
133
|
}}
|
|
128
134
|
```
|
|
129
135
|
|
|
@@ -134,7 +140,7 @@ def create_json_chat_agent(
|
|
|
134
140
|
```json
|
|
135
141
|
{{
|
|
136
142
|
"action": "Final Answer",
|
|
137
|
-
"action_input": string
|
|
143
|
+
"action_input": string \\ You should put what you want to return to use here
|
|
138
144
|
}}
|
|
139
145
|
```
|
|
140
146
|
|
langchain/agents/loading.py
CHANGED
|
@@ -48,6 +48,9 @@ def load_agent_from_config(
|
|
|
48
48
|
|
|
49
49
|
Returns:
|
|
50
50
|
An agent executor.
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
ValueError: If agent type is not specified in the config.
|
|
51
54
|
"""
|
|
52
55
|
if "_type" not in config:
|
|
53
56
|
raise ValueError("Must specify an agent Type in config")
|
|
@@ -99,6 +102,10 @@ def load_agent(
|
|
|
99
102
|
|
|
100
103
|
Returns:
|
|
101
104
|
An agent executor.
|
|
105
|
+
|
|
106
|
+
Raises:
|
|
107
|
+
RuntimeError: If loading from the deprecated github-based
|
|
108
|
+
Hub is attempted.
|
|
102
109
|
"""
|
|
103
110
|
if isinstance(path, str) and path.startswith("lc://"):
|
|
104
111
|
raise RuntimeError(
|