langchain 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. langchain/__init__.py +1 -0
  2. langchain/agents/__init__.py +5 -4
  3. langchain/agents/agent.py +272 -50
  4. langchain/agents/agent_iterator.py +20 -0
  5. langchain/agents/agent_toolkits/__init__.py +1 -0
  6. langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
  7. langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
  8. langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
  9. langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
  10. langchain/agents/agent_types.py +1 -0
  11. langchain/agents/chat/base.py +37 -1
  12. langchain/agents/chat/output_parser.py +14 -0
  13. langchain/agents/conversational/base.py +38 -6
  14. langchain/agents/conversational/output_parser.py +10 -0
  15. langchain/agents/conversational_chat/base.py +42 -3
  16. langchain/agents/format_scratchpad/__init__.py +1 -0
  17. langchain/agents/format_scratchpad/log.py +12 -1
  18. langchain/agents/format_scratchpad/log_to_messages.py +10 -1
  19. langchain/agents/format_scratchpad/openai_functions.py +10 -5
  20. langchain/agents/format_scratchpad/tools.py +11 -7
  21. langchain/agents/initialize.py +15 -7
  22. langchain/agents/json_chat/base.py +6 -0
  23. langchain/agents/loading.py +7 -0
  24. langchain/agents/mrkl/base.py +39 -10
  25. langchain/agents/mrkl/output_parser.py +12 -0
  26. langchain/agents/openai_assistant/base.py +37 -14
  27. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
  28. langchain/agents/openai_functions_agent/base.py +61 -10
  29. langchain/agents/openai_functions_multi_agent/base.py +22 -7
  30. langchain/agents/openai_tools/base.py +3 -0
  31. langchain/agents/output_parsers/__init__.py +1 -0
  32. langchain/agents/react/base.py +1 -0
  33. langchain/agents/self_ask_with_search/base.py +1 -0
  34. langchain/agents/structured_chat/output_parser.py +3 -3
  35. langchain/agents/tools.py +3 -0
  36. langchain/agents/utils.py +9 -1
  37. langchain/base_language.py +1 -0
  38. langchain/callbacks/__init__.py +1 -0
  39. langchain/callbacks/base.py +1 -0
  40. langchain/callbacks/streaming_stdout.py +1 -0
  41. langchain/callbacks/streaming_stdout_final_only.py +1 -0
  42. langchain/callbacks/tracers/evaluation.py +1 -0
  43. langchain/chains/api/base.py +5 -2
  44. langchain/chains/base.py +1 -1
  45. langchain/chains/combine_documents/base.py +59 -0
  46. langchain/chains/combine_documents/map_reduce.py +4 -2
  47. langchain/chains/combine_documents/map_rerank.py +5 -3
  48. langchain/chains/combine_documents/refine.py +4 -2
  49. langchain/chains/combine_documents/stuff.py +9 -4
  50. langchain/chains/constitutional_ai/base.py +1 -0
  51. langchain/chains/constitutional_ai/models.py +1 -0
  52. langchain/chains/constitutional_ai/principles.py +1 -0
  53. langchain/chains/conversation/base.py +81 -1
  54. langchain/chains/conversational_retrieval/base.py +2 -1
  55. langchain/chains/elasticsearch_database/base.py +2 -1
  56. langchain/chains/hyde/base.py +1 -0
  57. langchain/chains/llm.py +1 -0
  58. langchain/chains/llm_checker/base.py +4 -3
  59. langchain/chains/llm_math/base.py +1 -0
  60. langchain/chains/loading.py +2 -1
  61. langchain/chains/mapreduce.py +1 -0
  62. langchain/chains/moderation.py +1 -1
  63. langchain/chains/natbot/base.py +1 -0
  64. langchain/chains/openai_functions/base.py +1 -0
  65. langchain/chains/qa_generation/base.py +47 -1
  66. langchain/chains/qa_with_sources/__init__.py +1 -0
  67. langchain/chains/qa_with_sources/loading.py +1 -0
  68. langchain/chains/qa_with_sources/vector_db.py +1 -1
  69. langchain/chains/query_constructor/base.py +1 -0
  70. langchain/chains/query_constructor/ir.py +1 -0
  71. langchain/chains/question_answering/chain.py +1 -0
  72. langchain/chains/retrieval_qa/base.py +3 -2
  73. langchain/chains/router/base.py +1 -0
  74. langchain/chains/router/llm_router.py +2 -1
  75. langchain/chains/router/multi_prompt.py +1 -0
  76. langchain/chains/router/multi_retrieval_qa.py +1 -0
  77. langchain/chains/sequential.py +2 -1
  78. langchain/chains/structured_output/base.py +6 -6
  79. langchain/chains/summarize/chain.py +1 -0
  80. langchain/chains/transform.py +4 -3
  81. langchain/chat_models/__init__.py +1 -0
  82. langchain/chat_models/base.py +607 -9
  83. langchain/docstore/__init__.py +1 -0
  84. langchain/document_loaders/__init__.py +1 -0
  85. langchain/document_transformers/__init__.py +1 -0
  86. langchain/embeddings/__init__.py +0 -1
  87. langchain/evaluation/__init__.py +2 -1
  88. langchain/evaluation/agents/__init__.py +1 -0
  89. langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
  90. langchain/evaluation/comparison/__init__.py +1 -0
  91. langchain/evaluation/comparison/eval_chain.py +1 -0
  92. langchain/evaluation/comparison/prompt.py +1 -0
  93. langchain/evaluation/embedding_distance/__init__.py +1 -0
  94. langchain/evaluation/embedding_distance/base.py +1 -0
  95. langchain/evaluation/loading.py +1 -0
  96. langchain/evaluation/parsing/base.py +1 -0
  97. langchain/evaluation/qa/__init__.py +1 -0
  98. langchain/evaluation/qa/eval_chain.py +1 -0
  99. langchain/evaluation/qa/generate_chain.py +1 -0
  100. langchain/evaluation/schema.py +1 -0
  101. langchain/evaluation/scoring/__init__.py +1 -0
  102. langchain/evaluation/scoring/eval_chain.py +1 -0
  103. langchain/evaluation/scoring/prompt.py +1 -0
  104. langchain/evaluation/string_distance/__init__.py +1 -0
  105. langchain/example_generator.py +1 -0
  106. langchain/formatting.py +1 -0
  107. langchain/globals/__init__.py +1 -0
  108. langchain/graphs/__init__.py +1 -0
  109. langchain/indexes/__init__.py +1 -0
  110. langchain/indexes/_sql_record_manager.py +1 -2
  111. langchain/indexes/graph.py +1 -0
  112. langchain/indexes/prompts/__init__.py +1 -0
  113. langchain/input.py +1 -0
  114. langchain/llms/__init__.py +1 -0
  115. langchain/load/__init__.py +1 -0
  116. langchain/memory/__init__.py +5 -0
  117. langchain/memory/vectorstore_token_buffer_memory.py +184 -0
  118. langchain/output_parsers/__init__.py +1 -0
  119. langchain/prompts/__init__.py +1 -0
  120. langchain/prompts/example_selector/__init__.py +1 -0
  121. langchain/python.py +1 -0
  122. langchain/requests.py +1 -0
  123. langchain/retrievers/__init__.py +1 -0
  124. langchain/retrievers/document_compressors/chain_extract.py +1 -0
  125. langchain/retrievers/document_compressors/chain_filter.py +1 -0
  126. langchain/retrievers/ensemble.py +1 -0
  127. langchain/retrievers/self_query/base.py +7 -7
  128. langchain/schema/__init__.py +1 -0
  129. langchain/schema/runnable/__init__.py +1 -0
  130. langchain/serpapi.py +1 -0
  131. langchain/smith/__init__.py +6 -5
  132. langchain/smith/evaluation/__init__.py +0 -1
  133. langchain/smith/evaluation/string_run_evaluator.py +1 -0
  134. langchain/sql_database.py +1 -0
  135. langchain/storage/__init__.py +1 -0
  136. langchain/storage/_lc_store.py +1 -0
  137. langchain/storage/in_memory.py +1 -0
  138. langchain/text_splitter.py +1 -0
  139. langchain/tools/__init__.py +1 -0
  140. langchain/tools/amadeus/__init__.py +1 -0
  141. langchain/tools/azure_cognitive_services/__init__.py +1 -0
  142. langchain/tools/bing_search/__init__.py +1 -0
  143. langchain/tools/dataforseo_api_search/__init__.py +1 -0
  144. langchain/tools/ddg_search/__init__.py +1 -0
  145. langchain/tools/edenai/__init__.py +1 -0
  146. langchain/tools/eleven_labs/__init__.py +1 -0
  147. langchain/tools/file_management/__init__.py +1 -0
  148. langchain/tools/github/__init__.py +1 -1
  149. langchain/tools/gitlab/__init__.py +1 -1
  150. langchain/tools/gmail/__init__.py +1 -0
  151. langchain/tools/golden_query/__init__.py +1 -0
  152. langchain/tools/google_cloud/__init__.py +1 -0
  153. langchain/tools/google_finance/__init__.py +1 -0
  154. langchain/tools/google_jobs/__init__.py +1 -0
  155. langchain/tools/google_lens/__init__.py +1 -0
  156. langchain/tools/google_places/__init__.py +1 -0
  157. langchain/tools/google_scholar/__init__.py +1 -0
  158. langchain/tools/google_search/__init__.py +1 -0
  159. langchain/tools/google_trends/__init__.py +1 -0
  160. langchain/tools/human/__init__.py +1 -0
  161. langchain/tools/memorize/__init__.py +1 -0
  162. langchain/tools/metaphor_search/__init__.py +1 -0
  163. langchain/tools/multion/__init__.py +1 -0
  164. langchain/tools/office365/__init__.py +1 -0
  165. langchain/tools/openapi/utils/openapi_utils.py +1 -0
  166. langchain/tools/openweathermap/__init__.py +1 -0
  167. langchain/tools/playwright/__init__.py +1 -0
  168. langchain/tools/shell/__init__.py +1 -0
  169. langchain/tools/slack/__init__.py +1 -0
  170. langchain/tools/sql_database/prompt.py +1 -0
  171. langchain/tools/steamship_image_generation/__init__.py +1 -0
  172. langchain/tools/tavily_search/__init__.py +1 -0
  173. langchain/tools/wolfram_alpha/__init__.py +1 -0
  174. langchain/tools/zapier/__init__.py +1 -0
  175. langchain/utilities/__init__.py +1 -0
  176. langchain/utilities/python.py +1 -0
  177. langchain/vectorstores/__init__.py +1 -0
  178. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/METADATA +2 -3
  179. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/RECORD +182 -181
  180. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/LICENSE +0 -0
  181. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/WHEEL +0 -0
  182. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/entry_points.txt +0 -0
@@ -1,34 +1,107 @@
1
+ from __future__ import annotations
2
+
3
+ import warnings
1
4
  from importlib import util
2
- from typing import Any, Optional
5
+ from typing import (
6
+ Any,
7
+ AsyncIterator,
8
+ Callable,
9
+ Dict,
10
+ Iterator,
11
+ List,
12
+ Literal,
13
+ Optional,
14
+ Sequence,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ cast,
19
+ overload,
20
+ )
3
21
 
4
22
  from langchain_core._api import beta
5
- from langchain_core.language_models.chat_models import (
23
+ from langchain_core.language_models import (
6
24
  BaseChatModel,
25
+ LanguageModelInput,
7
26
  SimpleChatModel,
27
+ )
28
+ from langchain_core.language_models.chat_models import (
8
29
  agenerate_from_stream,
9
30
  generate_from_stream,
10
31
  )
32
+ from langchain_core.messages import AnyMessage, BaseMessage
33
+ from langchain_core.pydantic_v1 import BaseModel
34
+ from langchain_core.runnables import Runnable, RunnableConfig
35
+ from langchain_core.runnables.schema import StreamEvent
36
+ from langchain_core.tools import BaseTool
37
+ from langchain_core.tracers import RunLog, RunLogPatch
38
+ from typing_extensions import TypeAlias
11
39
 
12
40
  __all__ = [
41
+ "init_chat_model",
42
+ # For backwards compatibility
13
43
  "BaseChatModel",
14
44
  "SimpleChatModel",
15
45
  "generate_from_stream",
16
46
  "agenerate_from_stream",
17
- "init_chat_model",
18
47
  ]
19
48
 
20
49
 
50
+ @overload
51
+ def init_chat_model( # type: ignore[overload-overlap]
52
+ model: str,
53
+ *,
54
+ model_provider: Optional[str] = None,
55
+ configurable_fields: Literal[None] = None,
56
+ config_prefix: Optional[str] = None,
57
+ **kwargs: Any,
58
+ ) -> BaseChatModel: ...
59
+
60
+
61
+ @overload
62
+ def init_chat_model(
63
+ model: Literal[None] = None,
64
+ *,
65
+ model_provider: Optional[str] = None,
66
+ configurable_fields: Literal[None] = None,
67
+ config_prefix: Optional[str] = None,
68
+ **kwargs: Any,
69
+ ) -> _ConfigurableModel: ...
70
+
71
+
72
+ @overload
73
+ def init_chat_model(
74
+ model: Optional[str] = None,
75
+ *,
76
+ model_provider: Optional[str] = None,
77
+ configurable_fields: Union[Literal["any"], List[str], Tuple[str, ...]] = ...,
78
+ config_prefix: Optional[str] = None,
79
+ **kwargs: Any,
80
+ ) -> _ConfigurableModel: ...
81
+
82
+
21
83
  # FOR CONTRIBUTORS: If adding support for a new provider, please append the provider
22
84
  # name to the supported list in the docstring below. Do *not* change the order of the
23
85
  # existing providers.
24
86
  @beta()
25
87
  def init_chat_model(
26
- model: str, *, model_provider: Optional[str] = None, **kwargs: Any
27
- ) -> BaseChatModel:
88
+ model: Optional[str] = None,
89
+ *,
90
+ model_provider: Optional[str] = None,
91
+ configurable_fields: Optional[
92
+ Union[Literal["any"], List[str], Tuple[str, ...]]
93
+ ] = None,
94
+ config_prefix: Optional[str] = None,
95
+ **kwargs: Any,
96
+ ) -> Union[BaseChatModel, _ConfigurableModel]:
28
97
  """Initialize a ChatModel from the model name and provider.
29
98
 
30
99
  Must have the integration package corresponding to the model provider installed.
31
100
 
101
+ .. versionadded:: 0.2.7
102
+
103
+ .. versionchanged:: 0.2.8
104
+
32
105
  Args:
33
106
  model: The name of the model, e.g. "gpt-4o", "claude-3-opus-20240229".
34
107
  model_provider: The model provider. Supported model_provider values and the
@@ -55,19 +128,43 @@ def init_chat_model(
55
128
  - gemini... -> google_vertexai
56
129
  - command... -> cohere
57
130
  - accounts/fireworks... -> fireworks
131
+ configurable_fields: Which model parameters are
132
+ configurable:
133
+ - None: No configurable fields.
134
+ - "any": All fields are configurable. *See Security Note below.*
135
+ - Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
136
+
137
+ Fields are assumed to have config_prefix stripped if there is a
138
+ config_prefix. If model is specified, then defaults to None. If model is
139
+ not specified, then defaults to ``("model", "model_provider")``.
140
+
141
+ ***Security Note***: Setting ``configurable_fields="any"`` means fields like
142
+ api_key, base_url, etc. can be altered at runtime, potentially redirecting
143
+ model requests to a different service/user. Make sure that if you're
144
+ accepting untrusted configurations that you enumerate the
145
+ ``configurable_fields=(...)`` explicitly.
146
+
147
+ config_prefix: If config_prefix is a non-empty string then model will be
148
+ configurable at runtime via the
149
+ ``config["configurable"]["{config_prefix}_{param}"]`` keys. If
150
+ config_prefix is an empty string then model will be configurable via
151
+ ``config["configurable"]["{param}"]``.
58
152
  kwargs: Additional keyword args to pass to
59
153
  ``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``.
60
154
 
61
155
  Returns:
62
- The BaseChatModel corresponding to the model_name and model_provider specified.
156
+ A BaseChatModel corresponding to the model_name and model_provider specified if
157
+ configurability is inferred to be False. If configurable, a chat model emulator
158
+ that initializes the underlying model at runtime once a config is passed in.
63
159
 
64
160
  Raises:
65
161
  ValueError: If model_provider cannot be inferred or isn't supported.
66
162
  ImportError: If the model provider integration package is not installed.
67
163
 
68
- Example:
164
+ Initialize non-configurable models:
69
165
  .. code-block:: python
70
166
 
167
+ # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
71
168
  from langchain.chat_models import init_chat_model
72
169
 
73
170
  gpt_4o = init_chat_model("gpt-4o", model_provider="openai", temperature=0)
@@ -77,7 +174,125 @@ def init_chat_model(
77
174
  gpt_4o.invoke("what's your name")
78
175
  claude_opus.invoke("what's your name")
79
176
  gemini_15.invoke("what's your name")
177
+
178
+
179
+ Create a partially configurable model with no default model:
180
+ .. code-block:: python
181
+
182
+ # pip install langchain langchain-openai langchain-anthropic
183
+ from langchain.chat_models import init_chat_model
184
+
185
+ # We don't need to specify configurable=True if a model isn't specified.
186
+ configurable_model = init_chat_model(temperature=0)
187
+
188
+ configurable_model.invoke(
189
+ "what's your name",
190
+ config={"configurable": {"model": "gpt-4o"}}
191
+ )
192
+ # GPT-4o response
193
+
194
+ configurable_model.invoke(
195
+ "what's your name",
196
+ config={"configurable": {"model": "claude-3-5-sonnet-20240620"}}
197
+ )
198
+ # claude-3.5 sonnet response
199
+
200
+ Create a fully configurable model with a default model and a config prefix:
201
+ .. code-block:: python
202
+
203
+ # pip install langchain langchain-openai langchain-anthropic
204
+ from langchain.chat_models import init_chat_model
205
+
206
+ configurable_model_with_default = init_chat_model(
207
+ "gpt-4o",
208
+ model_provider="openai",
209
+ configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
210
+ config_prefix="foo",
211
+ temperature=0
212
+ )
213
+
214
+ configurable_model_with_default.invoke("what's your name")
215
+ # GPT-4o response with temperature 0
216
+
217
+ configurable_model_with_default.invoke(
218
+ "what's your name",
219
+ config={
220
+ "configurable": {
221
+ "foo_model": "claude-3-5-sonnet-20240620",
222
+ "foo_model_provider": "anthropic",
223
+ "foo_temperature": 0.6
224
+ }
225
+ }
226
+ )
227
+ # Claude-3.5 sonnet response with temperature 0.6
228
+
229
+ Bind tools to a configurable model:
230
+ You can call any ChatModel declarative methods on a configurable model in the
231
+ same way that you would with a normal model.
232
+
233
+ .. code-block:: python
234
+
235
+ # pip install langchain langchain-openai langchain-anthropic
236
+ from langchain.chat_models import init_chat_model
237
+ from langchain_core.pydantic_v1 import BaseModel, Field
238
+
239
+ class GetWeather(BaseModel):
240
+ '''Get the current weather in a given location'''
241
+
242
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
243
+
244
+ class GetPopulation(BaseModel):
245
+ '''Get the current population in a given location'''
246
+
247
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
248
+
249
+ configurable_model = init_chat_model(
250
+ "gpt-4o",
251
+ configurable_fields=("model", "model_provider"),
252
+ temperature=0
253
+ )
254
+
255
+ configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation])
256
+ configurable_model_with_tools.invoke(
257
+ "Which city is hotter today and which is bigger: LA or NY?"
258
+ )
259
+ # GPT-4o response with tool calls
260
+
261
+ configurable_model_with_tools.invoke(
262
+ "Which city is hotter today and which is bigger: LA or NY?",
263
+ config={"configurable": {"model": "claude-3-5-sonnet-20240620"}}
264
+ )
265
+ # Claude-3.5 sonnet response with tools
80
266
  """ # noqa: E501
267
+ if not model and not configurable_fields:
268
+ configurable_fields = ("model", "model_provider")
269
+ config_prefix = config_prefix or ""
270
+ if config_prefix and not configurable_fields:
271
+ warnings.warn(
272
+ f"{config_prefix=} has been set but no fields are configurable. Set "
273
+ f"`configurable_fields=(...)` to specify the model params that are "
274
+ f"configurable."
275
+ )
276
+
277
+ if not configurable_fields:
278
+ return _init_chat_model_helper(
279
+ cast(str, model), model_provider=model_provider, **kwargs
280
+ )
281
+ else:
282
+ if model:
283
+ kwargs["model"] = model
284
+ if model_provider:
285
+ kwargs["model_provider"] = model_provider
286
+ return _ConfigurableModel(
287
+ default_config=kwargs,
288
+ config_prefix=config_prefix,
289
+ configurable_fields=configurable_fields,
290
+ )
291
+
292
+
293
+ def _init_chat_model_helper(
294
+ model: str, *, model_provider: Optional[str] = None, **kwargs: Any
295
+ ) -> BaseChatModel:
81
296
  model_provider = model_provider or _attempt_infer_model_provider(model)
82
297
  if not model_provider:
83
298
  raise ValueError(
@@ -94,7 +309,7 @@ def init_chat_model(
94
309
  _check_pkg("langchain_anthropic")
95
310
  from langchain_anthropic import ChatAnthropic
96
311
 
97
- return ChatAnthropic(model=model, **kwargs)
312
+ return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg]
98
313
  elif model_provider == "azure_openai":
99
314
  _check_pkg("langchain_openai")
100
315
  from langchain_openai import AzureChatOpenAI
@@ -134,7 +349,7 @@ def init_chat_model(
134
349
  _check_pkg("langchain_mistralai")
135
350
  from langchain_mistralai import ChatMistralAI
136
351
 
137
- return ChatMistralAI(model=model, **kwargs)
352
+ return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg]
138
353
  elif model_provider == "huggingface":
139
354
  _check_pkg("langchain_huggingface")
140
355
  from langchain_huggingface import ChatHuggingFace
@@ -200,3 +415,386 @@ def _check_pkg(pkg: str) -> None:
200
415
  f"Unable to import {pkg_kebab}. Please install with "
201
416
  f"`pip install -U {pkg_kebab}`"
202
417
  )
418
+
419
+
420
+ def _remove_prefix(s: str, prefix: str) -> str:
421
+ if s.startswith(prefix):
422
+ s = s[len(prefix) :]
423
+ return s
424
+
425
+
426
+ _DECLARATIVE_METHODS = ("bind_tools", "with_structured_output")
427
+
428
+
429
+ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
430
+ def __init__(
431
+ self,
432
+ *,
433
+ default_config: Optional[dict] = None,
434
+ configurable_fields: Union[Literal["any"], List[str], Tuple[str, ...]] = "any",
435
+ config_prefix: str = "",
436
+ queued_declarative_operations: Sequence[Tuple[str, Tuple, Dict]] = (),
437
+ ) -> None:
438
+ self._default_config: dict = default_config or {}
439
+ self._configurable_fields: Union[Literal["any"], List[str]] = (
440
+ configurable_fields
441
+ if configurable_fields == "any"
442
+ else list(configurable_fields)
443
+ )
444
+ self._config_prefix = (
445
+ config_prefix + "_"
446
+ if config_prefix and not config_prefix.endswith("_")
447
+ else config_prefix
448
+ )
449
+ self._queued_declarative_operations: List[Tuple[str, Tuple, Dict]] = list(
450
+ queued_declarative_operations
451
+ )
452
+
453
+ def __getattr__(self, name: str) -> Any:
454
+ if name in _DECLARATIVE_METHODS:
455
+ # Declarative operations that cannot be applied until after an actual model
456
+ # object is instantiated. So instead of returning the actual operation,
457
+ # we record the operation and its arguments in a queue. This queue is
458
+ # then applied in order whenever we actually instantiate the model (in
459
+ # self._model()).
460
+ def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel:
461
+ queued_declarative_operations = list(
462
+ self._queued_declarative_operations
463
+ )
464
+ queued_declarative_operations.append((name, args, kwargs))
465
+ return _ConfigurableModel(
466
+ default_config=dict(self._default_config),
467
+ configurable_fields=list(self._configurable_fields)
468
+ if isinstance(self._configurable_fields, list)
469
+ else self._configurable_fields,
470
+ config_prefix=self._config_prefix,
471
+ queued_declarative_operations=queued_declarative_operations,
472
+ )
473
+
474
+ return queue
475
+ elif self._default_config and (model := self._model()) and hasattr(model, name):
476
+ return getattr(model, name)
477
+ else:
478
+ msg = f"{name} is not a BaseChatModel attribute"
479
+ if self._default_config:
480
+ msg += " and is not implemented on the default model"
481
+ msg += "."
482
+ raise AttributeError(msg)
483
+
484
+ def _model(self, config: Optional[RunnableConfig] = None) -> Runnable:
485
+ params = {**self._default_config, **self._model_params(config)}
486
+ model = _init_chat_model_helper(**params)
487
+ for name, args, kwargs in self._queued_declarative_operations:
488
+ model = getattr(model, name)(*args, **kwargs)
489
+ return model
490
+
491
+ def _model_params(self, config: Optional[RunnableConfig]) -> dict:
492
+ config = config or {}
493
+ model_params = {
494
+ _remove_prefix(k, self._config_prefix): v
495
+ for k, v in config.get("configurable", {}).items()
496
+ if k.startswith(self._config_prefix)
497
+ }
498
+ if self._configurable_fields != "any":
499
+ model_params = {
500
+ k: v for k, v in model_params.items() if k in self._configurable_fields
501
+ }
502
+ return model_params
503
+
504
+ def with_config(
505
+ self,
506
+ config: Optional[RunnableConfig] = None,
507
+ **kwargs: Any,
508
+ ) -> _ConfigurableModel:
509
+ """Bind config to a Runnable, returning a new Runnable."""
510
+ config = RunnableConfig(**(config or {}), **cast(RunnableConfig, kwargs))
511
+ model_params = self._model_params(config)
512
+ remaining_config = {k: v for k, v in config.items() if k != "configurable"}
513
+ remaining_config["configurable"] = {
514
+ k: v
515
+ for k, v in config.get("configurable", {}).items()
516
+ if _remove_prefix(k, self._config_prefix) not in model_params
517
+ }
518
+ queued_declarative_operations = list(self._queued_declarative_operations)
519
+ if remaining_config:
520
+ queued_declarative_operations.append(
521
+ ("with_config", (), {"config": remaining_config})
522
+ )
523
+ return _ConfigurableModel(
524
+ default_config={**self._default_config, **model_params},
525
+ configurable_fields=list(self._configurable_fields)
526
+ if isinstance(self._configurable_fields, list)
527
+ else self._configurable_fields,
528
+ config_prefix=self._config_prefix,
529
+ queued_declarative_operations=queued_declarative_operations,
530
+ )
531
+
532
+ @property
533
+ def InputType(self) -> TypeAlias:
534
+ """Get the input type for this runnable."""
535
+ from langchain_core.prompt_values import (
536
+ ChatPromptValueConcrete,
537
+ StringPromptValue,
538
+ )
539
+
540
+ # This is a version of LanguageModelInput which replaces the abstract
541
+ # base class BaseMessage with a union of its subclasses, which makes
542
+ # for a much better schema.
543
+ return Union[
544
+ str,
545
+ Union[StringPromptValue, ChatPromptValueConcrete],
546
+ List[AnyMessage],
547
+ ]
548
+
549
+ def invoke(
550
+ self,
551
+ input: LanguageModelInput,
552
+ config: Optional[RunnableConfig] = None,
553
+ **kwargs: Any,
554
+ ) -> Any:
555
+ return self._model(config).invoke(input, config=config, **kwargs)
556
+
557
+ async def ainvoke(
558
+ self,
559
+ input: LanguageModelInput,
560
+ config: Optional[RunnableConfig] = None,
561
+ **kwargs: Any,
562
+ ) -> Any:
563
+ return await self._model(config).ainvoke(input, config=config, **kwargs)
564
+
565
+ def stream(
566
+ self,
567
+ input: LanguageModelInput,
568
+ config: Optional[RunnableConfig] = None,
569
+ **kwargs: Optional[Any],
570
+ ) -> Iterator[Any]:
571
+ yield from self._model(config).stream(input, config=config, **kwargs)
572
+
573
+ async def astream(
574
+ self,
575
+ input: LanguageModelInput,
576
+ config: Optional[RunnableConfig] = None,
577
+ **kwargs: Optional[Any],
578
+ ) -> AsyncIterator[Any]:
579
+ async for x in self._model(config).astream(input, config=config, **kwargs):
580
+ yield x
581
+
582
+ def batch(
583
+ self,
584
+ inputs: List[LanguageModelInput],
585
+ config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
586
+ *,
587
+ return_exceptions: bool = False,
588
+ **kwargs: Optional[Any],
589
+ ) -> List[Any]:
590
+ config = config or None
591
+ # If <= 1 config use the underlying models batch implementation.
592
+ if config is None or isinstance(config, dict) or len(config) <= 1:
593
+ if isinstance(config, list):
594
+ config = config[0]
595
+ return self._model(config).batch(
596
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
597
+ )
598
+ # If multiple configs default to Runnable.batch which uses executor to invoke
599
+ # in parallel.
600
+ else:
601
+ return super().batch(
602
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
603
+ )
604
+
605
+ async def abatch(
606
+ self,
607
+ inputs: List[LanguageModelInput],
608
+ config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
609
+ *,
610
+ return_exceptions: bool = False,
611
+ **kwargs: Optional[Any],
612
+ ) -> List[Any]:
613
+ config = config or None
614
+ # If <= 1 config use the underlying models batch implementation.
615
+ if config is None or isinstance(config, dict) or len(config) <= 1:
616
+ if isinstance(config, list):
617
+ config = config[0]
618
+ return await self._model(config).abatch(
619
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
620
+ )
621
+ # If multiple configs default to Runnable.batch which uses executor to invoke
622
+ # in parallel.
623
+ else:
624
+ return await super().abatch(
625
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
626
+ )
627
+
628
+ def batch_as_completed(
629
+ self,
630
+ inputs: Sequence[LanguageModelInput],
631
+ config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None,
632
+ *,
633
+ return_exceptions: bool = False,
634
+ **kwargs: Any,
635
+ ) -> Iterator[Tuple[int, Union[Any, Exception]]]:
636
+ config = config or None
637
+ # If <= 1 config use the underlying models batch implementation.
638
+ if config is None or isinstance(config, dict) or len(config) <= 1:
639
+ if isinstance(config, list):
640
+ config = config[0]
641
+ yield from self._model(cast(RunnableConfig, config)).batch_as_completed( # type: ignore[call-overload]
642
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
643
+ )
644
+ # If multiple configs default to Runnable.batch which uses executor to invoke
645
+ # in parallel.
646
+ else:
647
+ yield from super().batch_as_completed( # type: ignore[call-overload]
648
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
649
+ )
650
+
651
+ async def abatch_as_completed(
652
+ self,
653
+ inputs: Sequence[LanguageModelInput],
654
+ config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None,
655
+ *,
656
+ return_exceptions: bool = False,
657
+ **kwargs: Any,
658
+ ) -> AsyncIterator[Tuple[int, Any]]:
659
+ config = config or None
660
+ # If <= 1 config use the underlying models batch implementation.
661
+ if config is None or isinstance(config, dict) or len(config) <= 1:
662
+ if isinstance(config, list):
663
+ config = config[0]
664
+ async for x in self._model(
665
+ cast(RunnableConfig, config)
666
+ ).abatch_as_completed( # type: ignore[call-overload]
667
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
668
+ ):
669
+ yield x
670
+ # If multiple configs default to Runnable.batch which uses executor to invoke
671
+ # in parallel.
672
+ else:
673
+ async for x in super().abatch_as_completed( # type: ignore[call-overload]
674
+ inputs, config=config, return_exceptions=return_exceptions, **kwargs
675
+ ):
676
+ yield x
677
+
678
+ def transform(
679
+ self,
680
+ input: Iterator[LanguageModelInput],
681
+ config: Optional[RunnableConfig] = None,
682
+ **kwargs: Optional[Any],
683
+ ) -> Iterator[Any]:
684
+ for x in self._model(config).transform(input, config=config, **kwargs):
685
+ yield x
686
+
687
+ async def atransform(
688
+ self,
689
+ input: AsyncIterator[LanguageModelInput],
690
+ config: Optional[RunnableConfig] = None,
691
+ **kwargs: Optional[Any],
692
+ ) -> AsyncIterator[Any]:
693
+ async for x in self._model(config).atransform(input, config=config, **kwargs):
694
+ yield x
695
+
696
+ @overload
697
+ def astream_log(
698
+ self,
699
+ input: Any,
700
+ config: Optional[RunnableConfig] = None,
701
+ *,
702
+ diff: Literal[True] = True,
703
+ with_streamed_output_list: bool = True,
704
+ include_names: Optional[Sequence[str]] = None,
705
+ include_types: Optional[Sequence[str]] = None,
706
+ include_tags: Optional[Sequence[str]] = None,
707
+ exclude_names: Optional[Sequence[str]] = None,
708
+ exclude_types: Optional[Sequence[str]] = None,
709
+ exclude_tags: Optional[Sequence[str]] = None,
710
+ **kwargs: Any,
711
+ ) -> AsyncIterator[RunLogPatch]: ...
712
+
713
+ @overload
714
+ def astream_log(
715
+ self,
716
+ input: Any,
717
+ config: Optional[RunnableConfig] = None,
718
+ *,
719
+ diff: Literal[False],
720
+ with_streamed_output_list: bool = True,
721
+ include_names: Optional[Sequence[str]] = None,
722
+ include_types: Optional[Sequence[str]] = None,
723
+ include_tags: Optional[Sequence[str]] = None,
724
+ exclude_names: Optional[Sequence[str]] = None,
725
+ exclude_types: Optional[Sequence[str]] = None,
726
+ exclude_tags: Optional[Sequence[str]] = None,
727
+ **kwargs: Any,
728
+ ) -> AsyncIterator[RunLog]: ...
729
+
730
+ async def astream_log(
731
+ self,
732
+ input: Any,
733
+ config: Optional[RunnableConfig] = None,
734
+ *,
735
+ diff: bool = True,
736
+ with_streamed_output_list: bool = True,
737
+ include_names: Optional[Sequence[str]] = None,
738
+ include_types: Optional[Sequence[str]] = None,
739
+ include_tags: Optional[Sequence[str]] = None,
740
+ exclude_names: Optional[Sequence[str]] = None,
741
+ exclude_types: Optional[Sequence[str]] = None,
742
+ exclude_tags: Optional[Sequence[str]] = None,
743
+ **kwargs: Any,
744
+ ) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]:
745
+ async for x in self._model(config).astream_log( # type: ignore[call-overload, misc]
746
+ input,
747
+ config=config,
748
+ diff=diff,
749
+ with_streamed_output_list=with_streamed_output_list,
750
+ include_names=include_names,
751
+ include_types=include_types,
752
+ include_tags=include_tags,
753
+ exclude_tags=exclude_tags,
754
+ exclude_types=exclude_types,
755
+ exclude_names=exclude_names,
756
+ **kwargs,
757
+ ):
758
+ yield x
759
+
760
+ async def astream_events(
761
+ self,
762
+ input: Any,
763
+ config: Optional[RunnableConfig] = None,
764
+ *,
765
+ version: Literal["v1", "v2"],
766
+ include_names: Optional[Sequence[str]] = None,
767
+ include_types: Optional[Sequence[str]] = None,
768
+ include_tags: Optional[Sequence[str]] = None,
769
+ exclude_names: Optional[Sequence[str]] = None,
770
+ exclude_types: Optional[Sequence[str]] = None,
771
+ exclude_tags: Optional[Sequence[str]] = None,
772
+ **kwargs: Any,
773
+ ) -> AsyncIterator[StreamEvent]:
774
+ async for x in self._model(config).astream_events(
775
+ input,
776
+ config=config,
777
+ version=version,
778
+ include_names=include_names,
779
+ include_types=include_types,
780
+ include_tags=include_tags,
781
+ exclude_tags=exclude_tags,
782
+ exclude_types=exclude_types,
783
+ exclude_names=exclude_names,
784
+ **kwargs,
785
+ ):
786
+ yield x
787
+
788
+ # Explicitly added to satisfy downstream linters.
789
+ def bind_tools(
790
+ self,
791
+ tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
792
+ **kwargs: Any,
793
+ ) -> Runnable[LanguageModelInput, BaseMessage]:
794
+ return self.__getattr__("bind_tools")(tools, **kwargs)
795
+
796
+ # Explicitly added to satisfy downstream linters.
797
+ def with_structured_output(
798
+ self, schema: Union[Dict, Type[BaseModel]], **kwargs: Any
799
+ ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
800
+ return self.__getattr__("with_structured_output")(schema, **kwargs)
@@ -14,6 +14,7 @@ The **Docstore** is a simplified version of the Document Loader.
14
14
 
15
15
  Document, AddableMixin
16
16
  """
17
+
17
18
  from typing import TYPE_CHECKING, Any
18
19
 
19
20
  from langchain._api import create_importer
@@ -14,6 +14,7 @@
14
14
 
15
15
  Document, <name>TextSplitter
16
16
  """
17
+
17
18
  from typing import TYPE_CHECKING, Any
18
19
 
19
20
  from langchain._api import create_importer