langchain 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (182) hide show
  1. langchain/__init__.py +1 -0
  2. langchain/agents/__init__.py +5 -4
  3. langchain/agents/agent.py +272 -50
  4. langchain/agents/agent_iterator.py +20 -0
  5. langchain/agents/agent_toolkits/__init__.py +1 -0
  6. langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
  7. langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
  8. langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
  9. langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
  10. langchain/agents/agent_types.py +1 -0
  11. langchain/agents/chat/base.py +37 -1
  12. langchain/agents/chat/output_parser.py +14 -0
  13. langchain/agents/conversational/base.py +38 -6
  14. langchain/agents/conversational/output_parser.py +10 -0
  15. langchain/agents/conversational_chat/base.py +42 -3
  16. langchain/agents/format_scratchpad/__init__.py +1 -0
  17. langchain/agents/format_scratchpad/log.py +12 -1
  18. langchain/agents/format_scratchpad/log_to_messages.py +10 -1
  19. langchain/agents/format_scratchpad/openai_functions.py +10 -5
  20. langchain/agents/format_scratchpad/tools.py +11 -7
  21. langchain/agents/initialize.py +15 -7
  22. langchain/agents/json_chat/base.py +6 -0
  23. langchain/agents/loading.py +7 -0
  24. langchain/agents/mrkl/base.py +39 -10
  25. langchain/agents/mrkl/output_parser.py +12 -0
  26. langchain/agents/openai_assistant/base.py +37 -14
  27. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
  28. langchain/agents/openai_functions_agent/base.py +61 -10
  29. langchain/agents/openai_functions_multi_agent/base.py +22 -7
  30. langchain/agents/openai_tools/base.py +3 -0
  31. langchain/agents/output_parsers/__init__.py +1 -0
  32. langchain/agents/react/base.py +1 -0
  33. langchain/agents/self_ask_with_search/base.py +1 -0
  34. langchain/agents/structured_chat/output_parser.py +3 -3
  35. langchain/agents/tools.py +3 -0
  36. langchain/agents/utils.py +9 -1
  37. langchain/base_language.py +1 -0
  38. langchain/callbacks/__init__.py +1 -0
  39. langchain/callbacks/base.py +1 -0
  40. langchain/callbacks/streaming_stdout.py +1 -0
  41. langchain/callbacks/streaming_stdout_final_only.py +1 -0
  42. langchain/callbacks/tracers/evaluation.py +1 -0
  43. langchain/chains/api/base.py +5 -2
  44. langchain/chains/base.py +1 -1
  45. langchain/chains/combine_documents/base.py +59 -0
  46. langchain/chains/combine_documents/map_reduce.py +4 -2
  47. langchain/chains/combine_documents/map_rerank.py +5 -3
  48. langchain/chains/combine_documents/refine.py +4 -2
  49. langchain/chains/combine_documents/stuff.py +1 -0
  50. langchain/chains/constitutional_ai/base.py +1 -0
  51. langchain/chains/constitutional_ai/models.py +1 -0
  52. langchain/chains/constitutional_ai/principles.py +1 -0
  53. langchain/chains/conversation/base.py +81 -1
  54. langchain/chains/conversational_retrieval/base.py +2 -1
  55. langchain/chains/elasticsearch_database/base.py +2 -1
  56. langchain/chains/hyde/base.py +1 -0
  57. langchain/chains/llm.py +1 -0
  58. langchain/chains/llm_checker/base.py +4 -3
  59. langchain/chains/llm_math/base.py +1 -0
  60. langchain/chains/loading.py +2 -1
  61. langchain/chains/mapreduce.py +1 -0
  62. langchain/chains/moderation.py +1 -1
  63. langchain/chains/natbot/base.py +1 -0
  64. langchain/chains/openai_functions/base.py +1 -0
  65. langchain/chains/qa_generation/base.py +47 -1
  66. langchain/chains/qa_with_sources/__init__.py +1 -0
  67. langchain/chains/qa_with_sources/loading.py +1 -0
  68. langchain/chains/qa_with_sources/vector_db.py +1 -1
  69. langchain/chains/query_constructor/base.py +1 -0
  70. langchain/chains/query_constructor/ir.py +1 -0
  71. langchain/chains/question_answering/chain.py +1 -0
  72. langchain/chains/retrieval_qa/base.py +3 -2
  73. langchain/chains/router/base.py +1 -0
  74. langchain/chains/router/llm_router.py +2 -1
  75. langchain/chains/router/multi_prompt.py +1 -0
  76. langchain/chains/router/multi_retrieval_qa.py +1 -0
  77. langchain/chains/sequential.py +2 -1
  78. langchain/chains/structured_output/base.py +6 -6
  79. langchain/chains/summarize/chain.py +1 -0
  80. langchain/chains/transform.py +4 -3
  81. langchain/chat_models/__init__.py +1 -0
  82. langchain/chat_models/base.py +2 -2
  83. langchain/docstore/__init__.py +1 -0
  84. langchain/document_loaders/__init__.py +1 -0
  85. langchain/document_transformers/__init__.py +1 -0
  86. langchain/embeddings/__init__.py +0 -1
  87. langchain/evaluation/__init__.py +2 -1
  88. langchain/evaluation/agents/__init__.py +1 -0
  89. langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
  90. langchain/evaluation/comparison/__init__.py +1 -0
  91. langchain/evaluation/comparison/eval_chain.py +1 -0
  92. langchain/evaluation/comparison/prompt.py +1 -0
  93. langchain/evaluation/embedding_distance/__init__.py +1 -0
  94. langchain/evaluation/embedding_distance/base.py +1 -0
  95. langchain/evaluation/loading.py +1 -0
  96. langchain/evaluation/parsing/base.py +1 -0
  97. langchain/evaluation/qa/__init__.py +1 -0
  98. langchain/evaluation/qa/eval_chain.py +1 -0
  99. langchain/evaluation/qa/generate_chain.py +1 -0
  100. langchain/evaluation/schema.py +1 -0
  101. langchain/evaluation/scoring/__init__.py +1 -0
  102. langchain/evaluation/scoring/eval_chain.py +1 -0
  103. langchain/evaluation/scoring/prompt.py +1 -0
  104. langchain/evaluation/string_distance/__init__.py +1 -0
  105. langchain/example_generator.py +1 -0
  106. langchain/formatting.py +1 -0
  107. langchain/globals/__init__.py +1 -0
  108. langchain/graphs/__init__.py +1 -0
  109. langchain/indexes/__init__.py +1 -0
  110. langchain/indexes/_sql_record_manager.py +1 -2
  111. langchain/indexes/graph.py +1 -0
  112. langchain/indexes/prompts/__init__.py +1 -0
  113. langchain/input.py +1 -0
  114. langchain/llms/__init__.py +1 -0
  115. langchain/load/__init__.py +1 -0
  116. langchain/memory/__init__.py +5 -0
  117. langchain/memory/vectorstore_token_buffer_memory.py +184 -0
  118. langchain/output_parsers/__init__.py +1 -0
  119. langchain/prompts/__init__.py +1 -0
  120. langchain/prompts/example_selector/__init__.py +1 -0
  121. langchain/python.py +1 -0
  122. langchain/requests.py +1 -0
  123. langchain/retrievers/__init__.py +1 -0
  124. langchain/retrievers/document_compressors/chain_extract.py +1 -0
  125. langchain/retrievers/document_compressors/chain_filter.py +1 -0
  126. langchain/retrievers/ensemble.py +1 -0
  127. langchain/retrievers/self_query/base.py +7 -7
  128. langchain/schema/__init__.py +1 -0
  129. langchain/schema/runnable/__init__.py +1 -0
  130. langchain/serpapi.py +1 -0
  131. langchain/smith/__init__.py +6 -5
  132. langchain/smith/evaluation/__init__.py +0 -1
  133. langchain/smith/evaluation/string_run_evaluator.py +1 -0
  134. langchain/sql_database.py +1 -0
  135. langchain/storage/__init__.py +1 -0
  136. langchain/storage/_lc_store.py +1 -0
  137. langchain/storage/in_memory.py +1 -0
  138. langchain/text_splitter.py +1 -0
  139. langchain/tools/__init__.py +1 -0
  140. langchain/tools/amadeus/__init__.py +1 -0
  141. langchain/tools/azure_cognitive_services/__init__.py +1 -0
  142. langchain/tools/bing_search/__init__.py +1 -0
  143. langchain/tools/dataforseo_api_search/__init__.py +1 -0
  144. langchain/tools/ddg_search/__init__.py +1 -0
  145. langchain/tools/edenai/__init__.py +1 -0
  146. langchain/tools/eleven_labs/__init__.py +1 -0
  147. langchain/tools/file_management/__init__.py +1 -0
  148. langchain/tools/github/__init__.py +1 -1
  149. langchain/tools/gitlab/__init__.py +1 -1
  150. langchain/tools/gmail/__init__.py +1 -0
  151. langchain/tools/golden_query/__init__.py +1 -0
  152. langchain/tools/google_cloud/__init__.py +1 -0
  153. langchain/tools/google_finance/__init__.py +1 -0
  154. langchain/tools/google_jobs/__init__.py +1 -0
  155. langchain/tools/google_lens/__init__.py +1 -0
  156. langchain/tools/google_places/__init__.py +1 -0
  157. langchain/tools/google_scholar/__init__.py +1 -0
  158. langchain/tools/google_search/__init__.py +1 -0
  159. langchain/tools/google_trends/__init__.py +1 -0
  160. langchain/tools/human/__init__.py +1 -0
  161. langchain/tools/memorize/__init__.py +1 -0
  162. langchain/tools/metaphor_search/__init__.py +1 -0
  163. langchain/tools/multion/__init__.py +1 -0
  164. langchain/tools/office365/__init__.py +1 -0
  165. langchain/tools/openapi/utils/openapi_utils.py +1 -0
  166. langchain/tools/openweathermap/__init__.py +1 -0
  167. langchain/tools/playwright/__init__.py +1 -0
  168. langchain/tools/shell/__init__.py +1 -0
  169. langchain/tools/slack/__init__.py +1 -0
  170. langchain/tools/sql_database/prompt.py +1 -0
  171. langchain/tools/steamship_image_generation/__init__.py +1 -0
  172. langchain/tools/tavily_search/__init__.py +1 -0
  173. langchain/tools/wolfram_alpha/__init__.py +1 -0
  174. langchain/tools/zapier/__init__.py +1 -0
  175. langchain/utilities/__init__.py +1 -0
  176. langchain/utilities/python.py +1 -0
  177. langchain/vectorstores/__init__.py +1 -0
  178. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/METADATA +2 -3
  179. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/RECORD +182 -181
  180. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/LICENSE +0 -0
  181. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/WHEEL +0 -0
  182. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/entry_points.txt +0 -0
@@ -30,6 +30,18 @@ class MRKLOutputParser(AgentOutputParser):
30
30
  return self.format_instructions
31
31
 
32
32
  def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
33
+ """Parse the output from the agent into
34
+ an AgentAction or AgentFinish object.
35
+
36
+ Args:
37
+ text: The text to parse.
38
+
39
+ Returns:
40
+ An AgentAction or AgentFinish object.
41
+
42
+ Raises:
43
+ OutputParserException: If the output could not be parsed.
44
+ """
33
45
  includes_answer = FINAL_ANSWER_ACTION in text
34
46
  regex = (
35
47
  r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
@@ -33,18 +33,34 @@ if TYPE_CHECKING:
33
33
 
34
34
 
35
35
  class OpenAIAssistantFinish(AgentFinish):
36
- """AgentFinish with run and thread metadata."""
36
+ """AgentFinish with run and thread metadata.
37
+
38
+ Parameters:
39
+ run_id: Run id.
40
+ thread_id: Thread id.
41
+ """
37
42
 
38
43
  run_id: str
39
44
  thread_id: str
40
45
 
41
46
  @classmethod
42
47
  def is_lc_serializable(cls) -> bool:
48
+ """Check if the class is serializable by LangChain.
49
+
50
+ Returns:
51
+ False
52
+ """
43
53
  return False
44
54
 
45
55
 
46
56
  class OpenAIAssistantAction(AgentAction):
47
- """AgentAction with info needed to submit custom tool output to existing run."""
57
+ """AgentAction with info needed to submit custom tool output to existing run.
58
+
59
+ Parameters:
60
+ tool_call_id: Tool call id.
61
+ run_id: Run id.
62
+ thread_id: Thread id
63
+ """
48
64
 
49
65
  tool_call_id: str
50
66
  run_id: str
@@ -52,6 +68,11 @@ class OpenAIAssistantAction(AgentAction):
52
68
 
53
69
  @classmethod
54
70
  def is_lc_serializable(cls) -> bool:
71
+ """Check if the class is serializable by LangChain.
72
+
73
+ Returns:
74
+ False
75
+ """
55
76
  return False
56
77
 
57
78
 
@@ -210,7 +231,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
210
231
  as_agent: bool = False
211
232
  """Use as a LangChain agent, compatible with the AgentExecutor."""
212
233
 
213
- @root_validator()
234
+ @root_validator(pre=False, skip_on_failure=True)
214
235
  def validate_async_client(cls, values: dict) -> dict:
215
236
  if values["async_client"] is None:
216
237
  import openai
@@ -238,7 +259,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
238
259
  tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
239
260
  model: Assistant model to use.
240
261
  client: OpenAI or AzureOpenAI client.
241
- Will create default OpenAI client if not specified.
262
+ Will create a default OpenAI client if not specified.
263
+ **kwargs: Additional arguments.
242
264
 
243
265
  Returns:
244
266
  OpenAIAssistantRunnable configured to run using the created assistant.
@@ -272,12 +294,12 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
272
294
  model: Override Assistant model for this run.
273
295
  tools: Override Assistant tools for this run.
274
296
  run_metadata: Metadata to associate with new run.
275
- config: Runnable config:
297
+ config: Runnable config. Defaults to None.
276
298
 
277
299
  Return:
278
300
  If self.as_agent, will return
279
- Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise,
280
- will return OpenAI types
301
+ Union[List[OpenAIAssistantAction], OpenAIAssistantFinish].
302
+ Otherwise, will return OpenAI types
281
303
  Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
282
304
  """
283
305
 
@@ -351,7 +373,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
351
373
  ] = None,
352
374
  **kwargs: Any,
353
375
  ) -> OpenAIAssistantRunnable:
354
- """Create an AsyncOpenAI Assistant and instantiate the Runnable.
376
+ """Async create an AsyncOpenAI Assistant and instantiate the Runnable.
355
377
 
356
378
  Args:
357
379
  name: Assistant name.
@@ -359,7 +381,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
359
381
  tools: Assistant tools. Can be passed in OpenAI format or as BaseTools.
360
382
  model: Assistant model to use.
361
383
  async_client: AsyncOpenAI client.
362
- Will create default async_client if not specified.
384
+ Will create default async_client if not specified.
363
385
 
364
386
  Returns:
365
387
  AsyncOpenAIAssistantRunnable configured to run using the created assistant.
@@ -387,19 +409,20 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
387
409
  run_id: Existing run to use. Should only be supplied when providing
388
410
  the tool output for a required action after an initial invocation.
389
411
  file_ids: File ids to include in new run. Used for retrieval.
390
- message_metadata: Metadata to associate with new message.
412
+ message_metadata: Metadata to associate with a new message.
391
413
  thread_metadata: Metadata to associate with new thread. Only relevant
392
- when new thread being created.
414
+ when a new thread is created.
393
415
  instructions: Additional run instructions.
394
416
  model: Override Assistant model for this run.
395
417
  tools: Override Assistant tools for this run.
396
418
  run_metadata: Metadata to associate with new run.
397
- config: Runnable config:
419
+ config: Runnable config. Defaults to None.
420
+ **kwargs: Additional arguments.
398
421
 
399
422
  Return:
400
423
  If self.as_agent, will return
401
- Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise,
402
- will return OpenAI types
424
+ Union[List[OpenAIAssistantAction], OpenAIAssistantFinish].
425
+ Otherwise, will return OpenAI types
403
426
  Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]].
404
427
  """
405
428
 
@@ -1,4 +1,5 @@
1
1
  """Memory used to save agent output AND intermediate steps."""
2
+
2
3
  from typing import Any, Dict, List
3
4
 
4
5
  from langchain_core.language_models import BaseLanguageModel
@@ -12,7 +13,22 @@ from langchain.memory.chat_memory import BaseChatMemory
12
13
 
13
14
 
14
15
  class AgentTokenBufferMemory(BaseChatMemory):
15
- """Memory used to save agent output AND intermediate steps."""
16
+ """Memory used to save agent output AND intermediate steps.
17
+
18
+ Parameters:
19
+ human_prefix: Prefix for human messages. Default is "Human".
20
+ ai_prefix: Prefix for AI messages. Default is "AI".
21
+ llm: Language model.
22
+ memory_key: Key to save memory under. Default is "history".
23
+ max_token_limit: Maximum number of tokens to keep in the buffer.
24
+ Once the buffer exceeds this many tokens, the oldest
25
+ messages will be pruned. Default is 12000.
26
+ return_messages: Whether to return messages. Default is True.
27
+ output_key: Key to save output under. Default is "output".
28
+ intermediate_steps_key: Key to save intermediate steps under.
29
+ Default is "intermediate_steps".
30
+ format_as_tools: Whether to format as tools. Default is False.
31
+ """
16
32
 
17
33
  human_prefix: str = "Human"
18
34
  ai_prefix: str = "AI"
@@ -33,14 +49,21 @@ class AgentTokenBufferMemory(BaseChatMemory):
33
49
 
34
50
  @property
35
51
  def memory_variables(self) -> List[str]:
36
- """Will always return list of memory variables.
52
+ """Always return list of memory variables.
37
53
 
38
54
  :meta private:
39
55
  """
40
56
  return [self.memory_key]
41
57
 
42
58
  def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
43
- """Return history buffer."""
59
+ """Return history buffer.
60
+
61
+ Args:
62
+ inputs: Inputs to the agent.
63
+
64
+ Returns:
65
+ A dictionary with the history buffer.
66
+ """
44
67
  if self.return_messages:
45
68
  final_buffer: Any = self.buffer
46
69
  else:
@@ -52,7 +75,12 @@ class AgentTokenBufferMemory(BaseChatMemory):
52
75
  return {self.memory_key: final_buffer}
53
76
 
54
77
  def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
55
- """Save context from this conversation to buffer. Pruned."""
78
+ """Save context from this conversation to buffer. Pruned.
79
+
80
+ Args:
81
+ inputs: Inputs to the agent.
82
+ outputs: Outputs from the agent.
83
+ """
56
84
  input_str, output_str = self._get_input_output(inputs, outputs)
57
85
  self.chat_memory.add_user_message(input_str)
58
86
  format_to_messages = (
@@ -1,4 +1,5 @@
1
1
  """Module implements an agent that uses OpenAI's APIs function enabled API."""
2
+
2
3
  from typing import Any, List, Optional, Sequence, Tuple, Type, Union
3
4
 
4
5
  from langchain_core._api import deprecated
@@ -41,21 +42,35 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
41
42
  prompt: The prompt for this agent, should support agent_scratchpad as one
42
43
  of the variables. For an easy way to construct this prompt, use
43
44
  `OpenAIFunctionsAgent.create_prompt(...)`
45
+ output_parser: The output parser for this agent. Should be an instance of
46
+ OpenAIFunctionsAgentOutputParser.
47
+ Defaults to OpenAIFunctionsAgentOutputParser.
44
48
  """
45
49
 
46
50
  llm: BaseLanguageModel
47
51
  tools: Sequence[BaseTool]
48
52
  prompt: BasePromptTemplate
49
- output_parser: Type[
53
+ output_parser: Type[OpenAIFunctionsAgentOutputParser] = (
50
54
  OpenAIFunctionsAgentOutputParser
51
- ] = OpenAIFunctionsAgentOutputParser
55
+ )
52
56
 
53
57
  def get_allowed_tools(self) -> List[str]:
54
58
  """Get allowed tools."""
55
59
  return [t.name for t in self.tools]
56
60
 
57
- @root_validator
61
+ @root_validator(pre=False, skip_on_failure=True)
58
62
  def validate_prompt(cls, values: dict) -> dict:
63
+ """Validate prompt.
64
+
65
+ Args:
66
+ values: Values to validate.
67
+
68
+ Returns:
69
+ Validated values.
70
+
71
+ Raises:
72
+ ValueError: If `agent_scratchpad` is not in the prompt.
73
+ """
59
74
  prompt: BasePromptTemplate = values["prompt"]
60
75
  if "agent_scratchpad" not in prompt.input_variables:
61
76
  raise ValueError(
@@ -71,6 +86,8 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
71
86
 
72
87
  @property
73
88
  def functions(self) -> List[dict]:
89
+ """Get functions."""
90
+
74
91
  return [dict(convert_to_openai_function(t)) for t in self.tools]
75
92
 
76
93
  def plan(
@@ -83,11 +100,16 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
83
100
  """Given input, decided what to do.
84
101
 
85
102
  Args:
86
- intermediate_steps: Steps the LLM has taken to date, along with observations
103
+ intermediate_steps: Steps the LLM has taken to date,
104
+ along with observations.
105
+ callbacks: Callbacks to use. Defaults to None.
106
+ with_functions: Whether to use functions. Defaults to True.
87
107
  **kwargs: User inputs.
88
108
 
89
109
  Returns:
90
110
  Action specifying what tool to use.
111
+ If the agent is finished, returns an AgentFinish.
112
+ If the agent is not finished, returns an AgentAction.
91
113
  """
92
114
  agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
93
115
  selected_inputs = {
@@ -116,15 +138,18 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
116
138
  callbacks: Callbacks = None,
117
139
  **kwargs: Any,
118
140
  ) -> Union[AgentAction, AgentFinish]:
119
- """Given input, decided what to do.
141
+ """Async given input, decided what to do.
120
142
 
121
143
  Args:
122
144
  intermediate_steps: Steps the LLM has taken to date,
123
- along with observations
145
+ along with observations.
146
+ callbacks: Callbacks to use. Defaults to None.
124
147
  **kwargs: User inputs.
125
148
 
126
149
  Returns:
127
150
  Action specifying what tool to use.
151
+ If the agent is finished, returns an AgentFinish.
152
+ If the agent is not finished, returns an AgentAction.
128
153
  """
129
154
  agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
130
155
  selected_inputs = {
@@ -145,7 +170,20 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
145
170
  intermediate_steps: List[Tuple[AgentAction, str]],
146
171
  **kwargs: Any,
147
172
  ) -> AgentFinish:
148
- """Return response when agent has been stopped due to max iterations."""
173
+ """Return response when agent has been stopped due to max iterations.
174
+
175
+ Args:
176
+ early_stopping_method: The early stopping method to use.
177
+ intermediate_steps: Intermediate steps.
178
+ **kwargs: User inputs.
179
+
180
+ Returns:
181
+ AgentFinish.
182
+
183
+ Raises:
184
+ ValueError: If `early_stopping_method` is not `force` or `generate`.
185
+ ValueError: If `agent_decision` is not an AgentAction.
186
+ """
149
187
  if early_stopping_method == "force":
150
188
  # `force` just returns a constant string
151
189
  return AgentFinish(
@@ -215,7 +253,17 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
215
253
  ),
216
254
  **kwargs: Any,
217
255
  ) -> BaseSingleActionAgent:
218
- """Construct an agent from an LLM and tools."""
256
+ """Construct an agent from an LLM and tools.
257
+
258
+ Args:
259
+ llm: The LLM to use as the agent.
260
+ tools: The tools to use.
261
+ callback_manager: The callback manager to use. Defaults to None.
262
+ extra_prompt_messages: Extra prompt messages to use. Defaults to None.
263
+ system_message: The system message to use.
264
+ Defaults to a default system message.
265
+ **kwargs: Additional parameters to pass to the agent.
266
+ """
219
267
  prompt = cls.create_prompt(
220
268
  extra_prompt_messages=extra_prompt_messages,
221
269
  system_message=system_message,
@@ -243,8 +291,11 @@ def create_openai_functions_agent(
243
291
 
244
292
  Returns:
245
293
  A Runnable sequence representing an agent. It takes as input all the same input
246
- variables as the prompt passed in does. It returns as output either an
247
- AgentAction or AgentFinish.
294
+ variables as the prompt passed in does. It returns as output either an
295
+ AgentAction or AgentFinish.
296
+
297
+ Raises:
298
+ ValueError: If `agent_scratchpad` is not in the prompt.
248
299
 
249
300
  Example:
250
301
 
@@ -1,4 +1,5 @@
1
1
  """Module implements an agent that uses OpenAI's APIs function enabled API."""
2
+
2
3
  import json
3
4
  from json import JSONDecodeError
4
5
  from typing import Any, List, Optional, Sequence, Tuple, Union
@@ -95,7 +96,7 @@ def _parse_ai_message(message: BaseMessage) -> Union[List[AgentAction], AgentFin
95
96
 
96
97
  @deprecated("0.1.0", alternative="create_openai_tools_agent", removal="0.3.0")
97
98
  class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
98
- """An Agent driven by OpenAIs function powered API.
99
+ """Agent driven by OpenAIs function powered API.
99
100
 
100
101
  Args:
101
102
  llm: This should be an instance of ChatOpenAI, specifically a model
@@ -114,7 +115,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
114
115
  """Get allowed tools."""
115
116
  return [t.name for t in self.tools]
116
117
 
117
- @root_validator
118
+ @root_validator(pre=False, skip_on_failure=True)
118
119
  def validate_prompt(cls, values: dict) -> dict:
119
120
  prompt: BasePromptTemplate = values["prompt"]
120
121
  if "agent_scratchpad" not in prompt.input_variables:
@@ -131,6 +132,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
131
132
 
132
133
  @property
133
134
  def functions(self) -> List[dict]:
135
+ """Get the functions for the agent."""
134
136
  enum_vals = [t.name for t in self.tools]
135
137
  tool_selection = {
136
138
  # OpenAI functions returns a single tool invocation
@@ -198,7 +200,9 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
198
200
  """Given input, decided what to do.
199
201
 
200
202
  Args:
201
- intermediate_steps: Steps the LLM has taken to date, along with observations
203
+ intermediate_steps: Steps the LLM has taken to date,
204
+ along with observations.
205
+ callbacks: Callbacks to use. Default is None.
202
206
  **kwargs: User inputs.
203
207
 
204
208
  Returns:
@@ -223,11 +227,12 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
223
227
  callbacks: Callbacks = None,
224
228
  **kwargs: Any,
225
229
  ) -> Union[List[AgentAction], AgentFinish]:
226
- """Given input, decided what to do.
230
+ """Async given input, decided what to do.
227
231
 
228
232
  Args:
229
233
  intermediate_steps: Steps the LLM has taken to date,
230
- along with observations
234
+ along with observations.
235
+ callbacks: Callbacks to use. Default is None.
231
236
  **kwargs: User inputs.
232
237
 
233
238
  Returns:
@@ -260,7 +265,7 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
260
265
  system_message: Message to use as the system message that will be the
261
266
  first in the prompt.
262
267
  extra_prompt_messages: Prompt messages that will be placed between the
263
- system message and the new human input.
268
+ system message and the new human input. Default is None.
264
269
 
265
270
  Returns:
266
271
  A prompt template to pass into this agent.
@@ -293,7 +298,17 @@ class OpenAIMultiFunctionsAgent(BaseMultiActionAgent):
293
298
  ),
294
299
  **kwargs: Any,
295
300
  ) -> BaseMultiActionAgent:
296
- """Construct an agent from an LLM and tools."""
301
+ """Construct an agent from an LLM and tools.
302
+
303
+ Args:
304
+ llm: The language model to use.
305
+ tools: A list of tools to use.
306
+ callback_manager: The callback manager to use. Default is None.
307
+ extra_prompt_messages: Extra prompt messages to use. Default is None.
308
+ system_message: The system message to use.
309
+ Default is a default system message.
310
+ **kwargs: Additional arguments.
311
+ """
297
312
  prompt = cls.create_prompt(
298
313
  extra_prompt_messages=extra_prompt_messages,
299
314
  system_message=system_message,
@@ -28,6 +28,9 @@ def create_openai_tools_agent(
28
28
  variables as the prompt passed in does. It returns as output either an
29
29
  AgentAction or AgentFinish.
30
30
 
31
+ Raises:
32
+ ValueError: If the prompt is missing required variables.
33
+
31
34
  Example:
32
35
 
33
36
  .. code-block:: python
@@ -9,6 +9,7 @@ This contains a `return_values` dictionary. This usually contains a
9
9
  single `output` key, but can be extended to contain more.
10
10
  This also contains a `log` variable (which contains a log of the agent's thinking).
11
11
  """
12
+
12
13
  from langchain.agents.output_parsers.json import JSONAgentOutputParser
13
14
  from langchain.agents.output_parsers.openai_functions import (
14
15
  OpenAIFunctionsAgentOutputParser,
@@ -1,4 +1,5 @@
1
1
  """Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import TYPE_CHECKING, Any, List, Optional, Sequence
@@ -1,4 +1,5 @@
1
1
  """Chain that does self-ask with search."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import TYPE_CHECKING, Any, Sequence, Union
@@ -69,9 +69,9 @@ class StructuredChatOutputParserWithRetries(AgentOutputParser):
69
69
  def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
70
70
  try:
71
71
  if self.output_fixing_parser is not None:
72
- parsed_obj: Union[
73
- AgentAction, AgentFinish
74
- ] = self.output_fixing_parser.parse(text)
72
+ parsed_obj: Union[AgentAction, AgentFinish] = (
73
+ self.output_fixing_parser.parse(text)
74
+ )
75
75
  else:
76
76
  parsed_obj = self.base_parser.parse(text)
77
77
  return parsed_obj
langchain/agents/tools.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """Interface for tools."""
2
+
2
3
  from typing import List, Optional
3
4
 
4
5
  from langchain_core.callbacks import (
@@ -12,7 +13,9 @@ class InvalidTool(BaseTool):
12
13
  """Tool that is run when invalid tool name is encountered by agent."""
13
14
 
14
15
  name: str = "invalid_tool"
16
+ """Name of the tool."""
15
17
  description: str = "Called when tool name is invalid. Suggests valid tool names."
18
+ """Description of the tool."""
16
19
 
17
20
  def _run(
18
21
  self,
langchain/agents/utils.py CHANGED
@@ -4,7 +4,15 @@ from langchain_core.tools import BaseTool
4
4
 
5
5
 
6
6
  def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None:
7
- """Validate tools for single input."""
7
+ """Validate tools for single input.
8
+
9
+ Args:
10
+ class_name: Name of the class.
11
+ tools: List of tools to validate.
12
+
13
+ Raises:
14
+ ValueError: If a multi-input tool is found in tools.
15
+ """
8
16
  for tool in tools:
9
17
  if not tool.is_single_input:
10
18
  raise ValueError(
@@ -1,4 +1,5 @@
1
1
  """Deprecated module for BaseLanguageModel class, kept for backwards compatibility."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from langchain_core.language_models import BaseLanguageModel
@@ -6,6 +6,7 @@
6
6
 
7
7
  BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
8
8
  """
9
+
9
10
  from typing import TYPE_CHECKING, Any
10
11
 
11
12
  from langchain_core.callbacks import (
@@ -1,4 +1,5 @@
1
1
  """Base callback handler that can be used to handle callbacks in langchain."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from langchain_core.callbacks import (
@@ -1,4 +1,5 @@
1
1
  """Callback Handler streams to stdout on new llm token."""
2
+
2
3
  from langchain_core.callbacks import StreamingStdOutCallbackHandler
3
4
 
4
5
  __all__ = ["StreamingStdOutCallbackHandler"]
@@ -1,4 +1,5 @@
1
1
  """Callback Handler streams to stdout on new llm token."""
2
+
2
3
  import sys
3
4
  from typing import Any, Dict, List, Optional
4
5
 
@@ -1,4 +1,5 @@
1
1
  """A tracer that runs evaluators over completed runs."""
2
+
2
3
  from langchain_core.tracers.evaluation import (
3
4
  EvaluatorCallbackHandler,
4
5
  wait_for_all_evaluators,
@@ -1,4 +1,5 @@
1
1
  """Chain that makes API calls and summarizes the responses to answer a question."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Optional, Sequence, Tuple
@@ -106,7 +107,7 @@ try:
106
107
  """
107
108
  return [self.output_key]
108
109
 
109
- @root_validator(pre=True)
110
+ @root_validator(pre=False, skip_on_failure=True)
110
111
  def validate_api_request_prompt(cls, values: Dict) -> Dict:
111
112
  """Check that api request prompt expects the right variables."""
112
113
  input_vars = values["api_request_chain"].prompt.input_variables
@@ -120,6 +121,8 @@ try:
120
121
  @root_validator(pre=True)
121
122
  def validate_limit_to_domains(cls, values: Dict) -> Dict:
122
123
  """Check that allowed domains are valid."""
124
+ # This check must be a pre=True check, so that a default of None
125
+ # won't be set to limit_to_domains if it's not provided.
123
126
  if "limit_to_domains" not in values:
124
127
  raise ValueError(
125
128
  "You must specify a list of domains to limit access using "
@@ -135,7 +138,7 @@ try:
135
138
  )
136
139
  return values
137
140
 
138
- @root_validator(pre=True)
141
+ @root_validator(pre=False, skip_on_failure=True)
139
142
  def validate_api_answer_prompt(cls, values: Dict) -> Dict:
140
143
  """Check that api answer prompt expects the right variables."""
141
144
  input_vars = values["api_answer_chain"].prompt.input_variables
langchain/chains/base.py CHANGED
@@ -225,7 +225,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
225
225
  def _chain_type(self) -> str:
226
226
  raise NotImplementedError("Saving not supported for this chain type.")
227
227
 
228
- @root_validator()
228
+ @root_validator(pre=True)
229
229
  def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:
230
230
  """Raise deprecation warning if callback_manager is used."""
231
231
  if values.get("callback_manager") is not None: