langchain 0.3.7__py3-none-any.whl → 0.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

@@ -7,7 +7,20 @@ from langchain_core._api.deprecation import (
7
7
  warn_deprecated,
8
8
  )
9
9
 
10
+ AGENT_DEPRECATION_WARNING = (
11
+ "LangChain agents will continue to be supported, but it is recommended for new "
12
+ "use cases to be built with LangGraph. LangGraph offers a more flexible and "
13
+ "full-featured framework for building agents, including support for "
14
+ "tool-calling, persistence of state, and human-in-the-loop workflows. See "
15
+ "LangGraph documentation for more details: "
16
+ "https://langchain-ai.github.io/langgraph/. Refer here for its pre-built "
17
+ "ReAct agent: "
18
+ "https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/"
19
+ )
20
+
21
+
10
22
  __all__ = [
23
+ "AGENT_DEPRECATION_WARNING",
11
24
  "LangChainDeprecationWarning",
12
25
  "LangChainPendingDeprecationWarning",
13
26
  "deprecated",
langchain/agents/agent.py CHANGED
@@ -47,6 +47,7 @@ from langchain_core.utils.input import get_color_mapping
47
47
  from pydantic import BaseModel, ConfigDict, model_validator
48
48
  from typing_extensions import Self
49
49
 
50
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
50
51
  from langchain.agents.agent_iterator import AgentExecutorIterator
51
52
  from langchain.agents.agent_types import AgentType
52
53
  from langchain.agents.tools import InvalidTool
@@ -633,10 +634,7 @@ class RunnableMultiActionAgent(BaseMultiActionAgent):
633
634
 
634
635
  @deprecated(
635
636
  "0.1.0",
636
- message=(
637
- "Use new agent constructor methods like create_react_agent, create_json_agent, "
638
- "create_structured_chat_agent, etc."
639
- ),
637
+ message=AGENT_DEPRECATION_WARNING,
640
638
  removal="1.0",
641
639
  )
642
640
  class LLMSingleActionAgent(BaseSingleActionAgent):
@@ -724,10 +722,7 @@ class LLMSingleActionAgent(BaseSingleActionAgent):
724
722
 
725
723
  @deprecated(
726
724
  "0.1.0",
727
- message=(
728
- "Use new agent constructor methods like create_react_agent, create_json_agent, "
729
- "create_structured_chat_agent, etc."
730
- ),
725
+ message=AGENT_DEPRECATION_WARNING,
731
726
  removal="1.0",
732
727
  )
733
728
  class Agent(BaseSingleActionAgent):
@@ -20,6 +20,10 @@ from langchain.chains.llm import LLMChain
20
20
  since="0.2.13",
21
21
  removal="1.0",
22
22
  message=(
23
+ "This function will continue to be supported, but it is recommended for new "
24
+ "use cases to be built with LangGraph. LangGraph offers a more flexible and "
25
+ "full-featured framework for building agents, including support for "
26
+ "tool-calling, persistence of state, and human-in-the-loop workflows. "
23
27
  "See API reference for this function for a replacement implementation: "
24
28
  "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
25
29
  "Read more here on how to create agents that query vector stores: "
@@ -109,6 +113,10 @@ def create_vectorstore_agent(
109
113
  since="0.2.13",
110
114
  removal="1.0",
111
115
  message=(
116
+ "This function will continue to be supported, but it is recommended for new "
117
+ "use cases to be built with LangGraph. LangGraph offers a more flexible and "
118
+ "full-featured framework for building agents, including support for "
119
+ "tool-calling, persistence of state, and human-in-the-loop workflows. "
112
120
  "See API reference for this function for a replacement implementation: "
113
121
  "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
114
122
  "Read more here on how to create agents that query vector stores: "
@@ -4,13 +4,12 @@ from enum import Enum
4
4
 
5
5
  from langchain_core._api import deprecated
6
6
 
7
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
8
+
7
9
 
8
10
  @deprecated(
9
11
  "0.1.0",
10
- message=(
11
- "Use new agent constructor methods like create_react_agent, create_json_agent, "
12
- "create_structured_chat_agent, etc."
13
- ),
12
+ message=AGENT_DEPRECATION_WARNING,
14
13
  removal="1.0",
15
14
  )
16
15
  class AgentType(str, Enum):
@@ -13,6 +13,7 @@ from langchain_core.prompts.chat import (
13
13
  from langchain_core.tools import BaseTool
14
14
  from pydantic import Field
15
15
 
16
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
16
17
  from langchain.agents.agent import Agent, AgentOutputParser
17
18
  from langchain.agents.chat.output_parser import ChatOutputParser
18
19
  from langchain.agents.chat.prompt import (
@@ -25,7 +26,11 @@ from langchain.agents.utils import validate_tools_single_input
25
26
  from langchain.chains.llm import LLMChain
26
27
 
27
28
 
28
- @deprecated("0.1.0", alternative="create_react_agent", removal="1.0")
29
+ @deprecated(
30
+ "0.1.0",
31
+ message=AGENT_DEPRECATION_WARNING,
32
+ removal="1.0",
33
+ )
29
34
  class ChatAgent(Agent):
30
35
  """Chat Agent."""
31
36
 
@@ -11,6 +11,7 @@ from langchain_core.prompts import PromptTemplate
11
11
  from langchain_core.tools import BaseTool
12
12
  from pydantic import Field
13
13
 
14
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
14
15
  from langchain.agents.agent import Agent, AgentOutputParser
15
16
  from langchain.agents.agent_types import AgentType
16
17
  from langchain.agents.conversational.output_parser import ConvoOutputParser
@@ -19,7 +20,11 @@ from langchain.agents.utils import validate_tools_single_input
19
20
  from langchain.chains import LLMChain
20
21
 
21
22
 
22
- @deprecated("0.1.0", alternative="create_react_agent", removal="1.0")
23
+ @deprecated(
24
+ "0.1.0",
25
+ message=AGENT_DEPRECATION_WARNING,
26
+ removal="1.0",
27
+ )
23
28
  class ConversationalAgent(Agent):
24
29
  """An agent that holds a conversation in addition to using tools."""
25
30
 
@@ -7,6 +7,7 @@ from langchain_core.callbacks import BaseCallbackManager
7
7
  from langchain_core.language_models import BaseLanguageModel
8
8
  from langchain_core.tools import BaseTool
9
9
 
10
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
10
11
  from langchain.agents.agent import AgentExecutor
11
12
  from langchain.agents.agent_types import AgentType
12
13
  from langchain.agents.loading import AGENT_TO_CLASS, load_agent
@@ -14,10 +15,7 @@ from langchain.agents.loading import AGENT_TO_CLASS, load_agent
14
15
 
15
16
  @deprecated(
16
17
  "0.1.0",
17
- alternative=(
18
- "Use new agent constructor methods like create_react_agent, create_json_agent, "
19
- "create_structured_chat_agent, etc."
20
- ),
18
+ message=AGENT_DEPRECATION_WARNING,
21
19
  removal="1.0",
22
20
  )
23
21
  def initialize_agent(
@@ -12,6 +12,7 @@ from langchain_core.tools import BaseTool, Tool
12
12
  from langchain_core.tools.render import render_text_description
13
13
  from pydantic import Field
14
14
 
15
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
15
16
  from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
16
17
  from langchain.agents.agent_types import AgentType
17
18
  from langchain.agents.mrkl.output_parser import MRKLOutputParser
@@ -34,7 +35,11 @@ class ChainConfig(NamedTuple):
34
35
  action_description: str
35
36
 
36
37
 
37
- @deprecated("0.1.0", alternative="create_react_agent", removal="1.0")
38
+ @deprecated(
39
+ "0.1.0",
40
+ message=AGENT_DEPRECATION_WARNING,
41
+ removal="1.0",
42
+ )
38
43
  class ZeroShotAgent(Agent):
39
44
  """Agent for the MRKL chain.
40
45
 
@@ -168,7 +173,11 @@ class ZeroShotAgent(Agent):
168
173
  super()._validate_tools(tools)
169
174
 
170
175
 
171
- @deprecated("0.1.0", removal="1.0")
176
+ @deprecated(
177
+ "0.1.0",
178
+ message=AGENT_DEPRECATION_WARNING,
179
+ removal="1.0",
180
+ )
172
181
  class MRKLChain(AgentExecutor):
173
182
  """Chain that implements the MRKL system."""
174
183
 
@@ -9,7 +9,7 @@ from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
9
9
 
10
10
  FINAL_ANSWER_ACTION = "Final Answer:"
11
11
  MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
12
- "Invalid Format: Missing 'Action:' after 'Thought:"
12
+ "Invalid Format: Missing 'Action:' after 'Thought:'"
13
13
  )
14
14
  MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
15
15
  "Invalid Format: Missing 'Action Input:' after 'Action:'"
@@ -11,6 +11,7 @@ from langchain_core.prompts import BasePromptTemplate
11
11
  from langchain_core.tools import BaseTool, Tool
12
12
  from pydantic import Field
13
13
 
14
+ from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
14
15
  from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
15
16
  from langchain.agents.agent_types import AgentType
16
17
  from langchain.agents.react.output_parser import ReActOutputParser
@@ -22,7 +23,11 @@ if TYPE_CHECKING:
22
23
  from langchain_community.docstore.base import Docstore
23
24
 
24
25
 
25
- @deprecated("0.1.0", removal="1.0")
26
+ @deprecated(
27
+ "0.1.0",
28
+ message=AGENT_DEPRECATION_WARNING,
29
+ removal="1.0",
30
+ )
26
31
  class ReActDocstoreAgent(Agent):
27
32
  """Agent for the ReAct chain."""
28
33
 
@@ -69,7 +74,11 @@ class ReActDocstoreAgent(Agent):
69
74
  return "Thought:"
70
75
 
71
76
 
72
- @deprecated("0.1.0", removal="1.0")
77
+ @deprecated(
78
+ "0.1.0",
79
+ message=AGENT_DEPRECATION_WARNING,
80
+ removal="1.0",
81
+ )
73
82
  class DocstoreExplorer:
74
83
  """Class to assist with exploration of a document store."""
75
84
 
@@ -119,7 +128,11 @@ class DocstoreExplorer:
119
128
  return self.document.page_content.split("\n\n")
120
129
 
121
130
 
122
- @deprecated("0.1.0", removal="1.0")
131
+ @deprecated(
132
+ "0.1.0",
133
+ message=AGENT_DEPRECATION_WARNING,
134
+ removal="1.0",
135
+ )
123
136
  class ReActTextWorldAgent(ReActDocstoreAgent):
124
137
  """Agent for the ReAct TextWorld chain."""
125
138
 
@@ -139,7 +152,11 @@ class ReActTextWorldAgent(ReActDocstoreAgent):
139
152
  raise ValueError(f"Tool name should be Play, got {tool_names}")
140
153
 
141
154
 
142
- @deprecated("0.1.0", removal="1.0")
155
+ @deprecated(
156
+ "0.1.0",
157
+ message=AGENT_DEPRECATION_WARNING,
158
+ removal="1.0",
159
+ )
143
160
  class ReActChain(AgentExecutor):
144
161
  """[Deprecated] Chain that implements the ReAct paper."""
145
162
 
@@ -198,7 +198,9 @@ try:
198
198
  api_docs: str
199
199
  question_key: str = "question" #: :meta private:
200
200
  output_key: str = "output" #: :meta private:
201
- limit_to_domains: Optional[Sequence[str]] = Field(default_factory=list)
201
+ limit_to_domains: Optional[Sequence[str]] = Field(
202
+ default_factory=list # type: ignore
203
+ )
202
204
  """Use to limit the domains that can be accessed by the API chain.
203
205
 
204
206
  * For example, to limit to just the domain `https://www.example.com`, set
@@ -28,10 +28,10 @@ from langchain.chains.llm import LLMChain
28
28
  since="0.2.13",
29
29
  removal="1.0",
30
30
  message=(
31
- "Refer here for a recommended map-reduce implementation using langgraph: "
32
- "https://langchain-ai.github.io/langgraph/how-tos/map-reduce/. See also "
33
- "migration guide: "
34
- "https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
31
+ "Refer to migration guide here for a recommended implementation using "
32
+ "LangGraph: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
33
+ ". See also LangGraph guides for map-reduce: "
34
+ "https://langchain-ai.github.io/langgraph/how-tos/map-reduce/."
35
35
  ),
36
36
  )
37
37
  class MapReduceChain(Chain):
@@ -38,7 +38,7 @@ class OpenAIModerationChain(Chain):
38
38
  output_key: str = "output" #: :meta private:
39
39
  openai_api_key: Optional[str] = None
40
40
  openai_organization: Optional[str] = None
41
- openai_pre_1_0: bool = Field(default=None)
41
+ openai_pre_1_0: bool = Field(default=False)
42
42
 
43
43
  @model_validator(mode="before")
44
44
  @classmethod
@@ -6,7 +6,9 @@ import warnings
6
6
  from typing import Any, Dict, List, Optional
7
7
 
8
8
  from langchain_core._api import deprecated
9
+ from langchain_core.caches import BaseCache as BaseCache
9
10
  from langchain_core.callbacks import CallbackManagerForChainRun
11
+ from langchain_core.callbacks import Callbacks as Callbacks
10
12
  from langchain_core.language_models import BaseLanguageModel
11
13
  from langchain_core.output_parsers import StrOutputParser
12
14
  from langchain_core.runnables import Runnable
@@ -156,3 +158,6 @@ class NatBotChain(Chain):
156
158
  @property
157
159
  def _chain_type(self) -> str:
158
160
  return "nat_bot_chain"
161
+
162
+
163
+ NatBotChain.model_rebuild()
@@ -20,9 +20,8 @@ from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMP
20
20
  since="0.2.12",
21
21
  removal="1.0",
22
22
  message=(
23
- "Use RunnableLambda to select from multiple prompt templates. See example "
24
- "in API reference: "
25
- "https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html" # noqa: E501
23
+ "Please see migration guide here for recommended implementation: "
24
+ "https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/" # noqa: E501
26
25
  ),
27
26
  )
28
27
  class MultiPromptChain(MultiRouteChain):
@@ -37,60 +36,109 @@ class MultiPromptChain(MultiRouteChain):
37
36
 
38
37
  from operator import itemgetter
39
38
  from typing import Literal
40
- from typing_extensions import TypedDict
41
39
 
42
40
  from langchain_core.output_parsers import StrOutputParser
43
41
  from langchain_core.prompts import ChatPromptTemplate
44
- from langchain_core.runnables import RunnableLambda, RunnablePassthrough
42
+ from langchain_core.runnables import RunnableConfig
45
43
  from langchain_openai import ChatOpenAI
44
+ from langgraph.graph import END, START, StateGraph
45
+ from typing_extensions import TypedDict
46
46
 
47
47
  llm = ChatOpenAI(model="gpt-4o-mini")
48
48
 
49
+ # Define the prompts we will route to
49
50
  prompt_1 = ChatPromptTemplate.from_messages(
50
51
  [
51
52
  ("system", "You are an expert on animals."),
52
- ("human", "{query}"),
53
+ ("human", "{input}"),
53
54
  ]
54
55
  )
55
56
  prompt_2 = ChatPromptTemplate.from_messages(
56
57
  [
57
58
  ("system", "You are an expert on vegetables."),
58
- ("human", "{query}"),
59
+ ("human", "{input}"),
59
60
  ]
60
61
  )
61
62
 
63
+ # Construct the chains we will route to. These format the input query
64
+ # into the respective prompt, run it through a chat model, and cast
65
+ # the result to a string.
62
66
  chain_1 = prompt_1 | llm | StrOutputParser()
63
67
  chain_2 = prompt_2 | llm | StrOutputParser()
64
68
 
69
+
70
+ # Next: define the chain that selects which branch to route to.
71
+ # Here we will take advantage of tool-calling features to force
72
+ # the output to select one of two desired branches.
65
73
  route_system = "Route the user's query to either the animal or vegetable expert."
66
74
  route_prompt = ChatPromptTemplate.from_messages(
67
75
  [
68
76
  ("system", route_system),
69
- ("human", "{query}"),
77
+ ("human", "{input}"),
70
78
  ]
71
79
  )
72
80
 
73
81
 
82
+ # Define schema for output:
74
83
  class RouteQuery(TypedDict):
75
- \"\"\"Route query to destination.\"\"\"
84
+ \"\"\"Route query to destination expert.\"\"\"
85
+
76
86
  destination: Literal["animal", "vegetable"]
77
87
 
78
88
 
79
- route_chain = (
80
- route_prompt
81
- | llm.with_structured_output(RouteQuery)
82
- | itemgetter("destination")
83
- )
89
+ route_chain = route_prompt | llm.with_structured_output(RouteQuery)
84
90
 
85
- chain = {
86
- "destination": route_chain, # "animal" or "vegetable"
87
- "query": lambda x: x["query"], # pass through input query
88
- } | RunnableLambda(
89
- # if animal, chain_1. otherwise, chain_2.
90
- lambda x: chain_1 if x["destination"] == "animal" else chain_2,
91
- )
92
91
 
93
- chain.invoke({"query": "what color are carrots"})
92
+ # For LangGraph, we will define the state of the graph to hold the query,
93
+ # destination, and final answer.
94
+ class State(TypedDict):
95
+ query: str
96
+ destination: RouteQuery
97
+ answer: str
98
+
99
+
100
+ # We define functions for each node, including routing the query:
101
+ async def route_query(state: State, config: RunnableConfig):
102
+ destination = await route_chain.ainvoke(state["query"], config)
103
+ return {"destination": destination}
104
+
105
+
106
+ # And one node for each prompt
107
+ async def prompt_1(state: State, config: RunnableConfig):
108
+ return {"answer": await chain_1.ainvoke(state["query"], config)}
109
+
110
+
111
+ async def prompt_2(state: State, config: RunnableConfig):
112
+ return {"answer": await chain_2.ainvoke(state["query"], config)}
113
+
114
+
115
+ # We then define logic that selects the prompt based on the classification
116
+ def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
117
+ if state["destination"] == "animal":
118
+ return "prompt_1"
119
+ else:
120
+ return "prompt_2"
121
+
122
+
123
+ # Finally, assemble the multi-prompt chain. This is a sequence of two steps:
124
+ # 1) Select "animal" or "vegetable" via the route_chain, and collect the answer
125
+ # alongside the input query.
126
+ # 2) Route the input query to chain_1 or chain_2, based on the
127
+ # selection.
128
+ graph = StateGraph(State)
129
+ graph.add_node("route_query", route_query)
130
+ graph.add_node("prompt_1", prompt_1)
131
+ graph.add_node("prompt_2", prompt_2)
132
+
133
+ graph.add_edge(START, "route_query")
134
+ graph.add_conditional_edges("route_query", select_node)
135
+ graph.add_edge("prompt_1", END)
136
+ graph.add_edge("prompt_2", END)
137
+ app = graph.compile()
138
+
139
+ result = await app.ainvoke({"query": "what color are carrots"})
140
+ print(result["destination"])
141
+ print(result["answer"])
94
142
  """ # noqa: E501
95
143
 
96
144
  @property
@@ -149,7 +149,16 @@ def init_chat_model(
149
149
  ``config["configurable"]["{config_prefix}_{param}"]`` keys. If
150
150
  config_prefix is an empty string then model will be configurable via
151
151
  ``config["configurable"]["{param}"]``.
152
- kwargs: Additional keyword args to pass to
152
+ temperature: Model temperature.
153
+ max_tokens: Max output tokens.
154
+ timeout: The maximum time (in seconds) to wait for a response from the model
155
+ before canceling the request.
156
+ max_retries: The maximum number of attempts the system will make to resend a
157
+ request if it fails due to issues like network timeouts or rate limits.
158
+ base_url: The URL of the API endpoint where requests are sent.
159
+ rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding
160
+ rate limits.
161
+ kwargs: Additional model-specific keyword args to pass to
153
162
  ``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``.
154
163
 
155
164
  Returns:
@@ -319,13 +328,7 @@ def init_chat_model(
319
328
  def _init_chat_model_helper(
320
329
  model: str, *, model_provider: Optional[str] = None, **kwargs: Any
321
330
  ) -> BaseChatModel:
322
- model_provider = model_provider or _attempt_infer_model_provider(model)
323
- if not model_provider:
324
- raise ValueError(
325
- f"Unable to infer model provider for {model=}, please specify "
326
- f"model_provider directly."
327
- )
328
- model_provider = model_provider.replace("-", "_").lower()
331
+ model, model_provider = _parse_model(model, model_provider)
329
332
  if model_provider == "openai":
330
333
  _check_pkg("langchain_openai")
331
334
  from langchain_openai import ChatOpenAI
@@ -452,6 +455,24 @@ def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
452
455
  return None
453
456
 
454
457
 
458
+ def _parse_model(model: str, model_provider: Optional[str]) -> Tuple[str, str]:
459
+ if (
460
+ not model_provider
461
+ and ":" in model
462
+ and model.split(":")[0] in _SUPPORTED_PROVIDERS
463
+ ):
464
+ model_provider = model.split(":")[0]
465
+ model = ":".join(model.split(":")[1:])
466
+ model_provider = model_provider or _attempt_infer_model_provider(model)
467
+ if not model_provider:
468
+ raise ValueError(
469
+ f"Unable to infer model provider for {model=}, please specify "
470
+ f"model_provider directly."
471
+ )
472
+ model_provider = model_provider.replace("-", "_").lower()
473
+ return model, model_provider
474
+
475
+
455
476
  def _check_pkg(pkg: str) -> None:
456
477
  if not util.find_spec(pkg):
457
478
  pkg_kebab = pkg.replace("_", "-")
@@ -14,6 +14,7 @@ import logging
14
14
  from typing import TYPE_CHECKING, Any
15
15
 
16
16
  from langchain._api import create_importer
17
+ from langchain.embeddings.base import init_embeddings
17
18
  from langchain.embeddings.cache import CacheBackedEmbeddings
18
19
 
19
20
  if TYPE_CHECKING:
@@ -221,4 +222,5 @@ __all__ = [
221
222
  "VertexAIEmbeddings",
222
223
  "VoyageEmbeddings",
223
224
  "XinferenceEmbeddings",
225
+ "init_embeddings",
224
226
  ]
@@ -1,4 +1,224 @@
1
+ import functools
2
+ from importlib import util
3
+ from typing import Any, List, Optional, Tuple, Union
4
+
5
+ from langchain_core._api import beta
1
6
  from langchain_core.embeddings import Embeddings
7
+ from langchain_core.runnables import Runnable
8
+
9
+ _SUPPORTED_PROVIDERS = {
10
+ "azure_openai": "langchain_openai",
11
+ "bedrock": "langchain_aws",
12
+ "cohere": "langchain_cohere",
13
+ "google_vertexai": "langchain_google_vertexai",
14
+ "huggingface": "langchain_huggingface",
15
+ "mistralai": "langchain_mistralai",
16
+ "openai": "langchain_openai",
17
+ }
18
+
19
+
20
+ def _get_provider_list() -> str:
21
+ """Get formatted list of providers and their packages."""
22
+ return "\n".join(
23
+ f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items()
24
+ )
25
+
26
+
27
+ def _parse_model_string(model_name: str) -> Tuple[str, str]:
28
+ """Parse a model string into provider and model name components.
29
+
30
+ The model string should be in the format 'provider:model-name', where provider
31
+ is one of the supported providers.
32
+
33
+ Args:
34
+ model_name: A model string in the format 'provider:model-name'
35
+
36
+ Returns:
37
+ A tuple of (provider, model_name)
38
+
39
+ .. code-block:: python
40
+
41
+ _parse_model_string("openai:text-embedding-3-small")
42
+ # Returns: ("openai", "text-embedding-3-small")
43
+
44
+ _parse_model_string("bedrock:amazon.titan-embed-text-v1")
45
+ # Returns: ("bedrock", "amazon.titan-embed-text-v1")
46
+
47
+ Raises:
48
+ ValueError: If the model string is not in the correct format or
49
+ the provider is unsupported
50
+ """
51
+ if ":" not in model_name:
52
+ providers = _SUPPORTED_PROVIDERS
53
+ raise ValueError(
54
+ f"Invalid model format '{model_name}'.\n"
55
+ f"Model name must be in format 'provider:model-name'\n"
56
+ f"Example valid model strings:\n"
57
+ f" - openai:text-embedding-3-small\n"
58
+ f" - bedrock:amazon.titan-embed-text-v1\n"
59
+ f" - cohere:embed-english-v3.0\n"
60
+ f"Supported providers: {providers}"
61
+ )
62
+
63
+ provider, model = model_name.split(":", 1)
64
+ provider = provider.lower().strip()
65
+ model = model.strip()
66
+
67
+ if provider not in _SUPPORTED_PROVIDERS:
68
+ raise ValueError(
69
+ f"Provider '{provider}' is not supported.\n"
70
+ f"Supported providers and their required packages:\n"
71
+ f"{_get_provider_list()}"
72
+ )
73
+ if not model:
74
+ raise ValueError("Model name cannot be empty")
75
+ return provider, model
76
+
77
+
78
+ def _infer_model_and_provider(
79
+ model: str, *, provider: Optional[str] = None
80
+ ) -> Tuple[str, str]:
81
+ if not model.strip():
82
+ raise ValueError("Model name cannot be empty")
83
+ if provider is None and ":" in model:
84
+ provider, model_name = _parse_model_string(model)
85
+ else:
86
+ provider = provider
87
+ model_name = model
88
+
89
+ if not provider:
90
+ providers = _SUPPORTED_PROVIDERS
91
+ raise ValueError(
92
+ "Must specify either:\n"
93
+ "1. A model string in format 'provider:model-name'\n"
94
+ " Example: 'openai:text-embedding-3-small'\n"
95
+ "2. Or explicitly set provider from: "
96
+ f"{providers}"
97
+ )
98
+
99
+ if provider not in _SUPPORTED_PROVIDERS:
100
+ raise ValueError(
101
+ f"Provider '{provider}' is not supported.\n"
102
+ f"Supported providers and their required packages:\n"
103
+ f"{_get_provider_list()}"
104
+ )
105
+ return provider, model_name
106
+
107
+
108
+ @functools.lru_cache(maxsize=len(_SUPPORTED_PROVIDERS))
109
+ def _check_pkg(pkg: str) -> None:
110
+ """Check if a package is installed."""
111
+ if not util.find_spec(pkg):
112
+ raise ImportError(
113
+ f"Could not import {pkg} python package. "
114
+ f"Please install it with `pip install {pkg}`"
115
+ )
116
+
117
+
118
+ @beta()
119
+ def init_embeddings(
120
+ model: str,
121
+ *,
122
+ provider: Optional[str] = None,
123
+ **kwargs: Any,
124
+ ) -> Union[Embeddings, Runnable[Any, List[float]]]:
125
+ """Initialize an embeddings model from a model name and optional provider.
126
+
127
+ **Note:** Must have the integration package corresponding to the model provider
128
+ installed.
129
+
130
+ Args:
131
+ model: Name of the model to use. Can be either:
132
+ - A model string like "openai:text-embedding-3-small"
133
+ - Just the model name if provider is specified
134
+ provider: Optional explicit provider name. If not specified,
135
+ will attempt to parse from the model string. Supported providers
136
+ and their required packages:
137
+
138
+ {_get_provider_list()}
139
+
140
+ **kwargs: Additional model-specific parameters passed to the embedding model.
141
+ These vary by provider, see the provider-specific documentation for details.
142
+
143
+ Returns:
144
+ An Embeddings instance that can generate embeddings for text.
145
+
146
+ Raises:
147
+ ValueError: If the model provider is not supported or cannot be determined
148
+ ImportError: If the required provider package is not installed
149
+
150
+ .. dropdown:: Example Usage
151
+ :open:
152
+
153
+ .. code-block:: python
154
+
155
+ # Using a model string
156
+ model = init_embeddings("openai:text-embedding-3-small")
157
+ model.embed_query("Hello, world!")
158
+
159
+ # Using explicit provider
160
+ model = init_embeddings(
161
+ model="text-embedding-3-small",
162
+ provider="openai"
163
+ )
164
+ model.embed_documents(["Hello, world!", "Goodbye, world!"])
165
+
166
+ # With additional parameters
167
+ model = init_embeddings(
168
+ "openai:text-embedding-3-small",
169
+ api_key="sk-..."
170
+ )
171
+
172
+ .. versionadded:: 0.3.9
173
+ """
174
+ if not model:
175
+ providers = _SUPPORTED_PROVIDERS.keys()
176
+ raise ValueError(
177
+ "Must specify model name. "
178
+ f"Supported providers are: {', '.join(providers)}"
179
+ )
180
+
181
+ provider, model_name = _infer_model_and_provider(model, provider=provider)
182
+ pkg = _SUPPORTED_PROVIDERS[provider]
183
+ _check_pkg(pkg)
184
+
185
+ if provider == "openai":
186
+ from langchain_openai import OpenAIEmbeddings
187
+
188
+ return OpenAIEmbeddings(model=model_name, **kwargs)
189
+ elif provider == "azure_openai":
190
+ from langchain_openai import AzureOpenAIEmbeddings
191
+
192
+ return AzureOpenAIEmbeddings(model=model_name, **kwargs)
193
+ elif provider == "google_vertexai":
194
+ from langchain_google_vertexai import VertexAIEmbeddings
195
+
196
+ return VertexAIEmbeddings(model=model_name, **kwargs)
197
+ elif provider == "bedrock":
198
+ from langchain_aws import BedrockEmbeddings
199
+
200
+ return BedrockEmbeddings(model_id=model_name, **kwargs)
201
+ elif provider == "cohere":
202
+ from langchain_cohere import CohereEmbeddings
203
+
204
+ return CohereEmbeddings(model=model_name, **kwargs)
205
+ elif provider == "mistralai":
206
+ from langchain_mistralai import MistralAIEmbeddings
207
+
208
+ return MistralAIEmbeddings(model=model_name, **kwargs)
209
+ elif provider == "huggingface":
210
+ from langchain_huggingface import HuggingFaceEmbeddings
211
+
212
+ return HuggingFaceEmbeddings(model_name=model_name, **kwargs)
213
+ else:
214
+ raise ValueError(
215
+ f"Provider '{provider}' is not supported.\n"
216
+ f"Supported providers and their required packages:\n"
217
+ f"{_get_provider_list()}"
218
+ )
219
+
2
220
 
3
- # This is for backwards compatibility
4
- __all__ = ["Embeddings"]
221
+ __all__ = [
222
+ "init_embeddings",
223
+ "Embeddings", # This one is for backwards compatibility
224
+ ]
@@ -3,6 +3,8 @@ from __future__ import annotations
3
3
  from typing import Any, Dict, List, Type
4
4
 
5
5
  from langchain_core._api import deprecated
6
+ from langchain_core.caches import BaseCache as BaseCache # For model_rebuild
7
+ from langchain_core.callbacks import Callbacks as Callbacks # For model_rebuild
6
8
  from langchain_core.chat_history import BaseChatMessageHistory
7
9
  from langchain_core.language_models import BaseLanguageModel
8
10
  from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
@@ -131,3 +133,6 @@ class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
131
133
  """Clear memory contents."""
132
134
  super().clear()
133
135
  self.buffer = ""
136
+
137
+
138
+ ConversationSummaryMemory.model_rebuild()
@@ -109,7 +109,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
109
109
  previous_history_template: str = DEFAULT_HISTORY_TEMPLATE
110
110
  split_chunk_size: int = 1000
111
111
 
112
- _memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None)
112
+ _memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None) # type: ignore
113
113
  _timestamps: List[datetime] = PrivateAttr(default_factory=list)
114
114
 
115
115
  @property
@@ -27,11 +27,12 @@ class OutputFixingParser(BaseOutputParser[T]):
27
27
  def is_lc_serializable(cls) -> bool:
28
28
  return True
29
29
 
30
- parser: Annotated[BaseOutputParser[T], SkipValidation()]
30
+ parser: Annotated[Any, SkipValidation()]
31
31
  """The parser to use to parse the output."""
32
32
  # Should be an LLMChain but we want to avoid top-level imports from langchain.chains
33
- retry_chain: Union[
34
- RunnableSerializable[OutputFixingParserRetryChainInput, str], Any
33
+ retry_chain: Annotated[
34
+ Union[RunnableSerializable[OutputFixingParserRetryChainInput, str], Any],
35
+ SkipValidation(),
35
36
  ]
36
37
  """The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
37
38
  max_retries: int = 1
@@ -57,7 +57,10 @@ class RetryOutputParser(BaseOutputParser[T]):
57
57
  parser: Annotated[BaseOutputParser[T], SkipValidation()]
58
58
  """The parser to use to parse the output."""
59
59
  # Should be an LLMChain but we want to avoid top-level imports from langchain.chains
60
- retry_chain: Union[RunnableSerializable[RetryOutputParserRetryChainInput, str], Any]
60
+ retry_chain: Annotated[
61
+ Union[RunnableSerializable[RetryOutputParserRetryChainInput, str], Any],
62
+ SkipValidation(),
63
+ ]
61
64
  """The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
62
65
  max_retries: int = 1
63
66
  """The maximum number of times to retry the parse."""
@@ -187,8 +190,11 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
187
190
  parser: Annotated[BaseOutputParser[T], SkipValidation()]
188
191
  """The parser to use to parse the output."""
189
192
  # Should be an LLMChain but we want to avoid top-level imports from langchain.chains
190
- retry_chain: Union[
191
- RunnableSerializable[RetryWithErrorOutputParserRetryChainInput, str], Any
193
+ retry_chain: Annotated[
194
+ Union[
195
+ RunnableSerializable[RetryWithErrorOutputParserRetryChainInput, str], Any
196
+ ],
197
+ SkipValidation(),
192
198
  ]
193
199
  """The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
194
200
  max_retries: int = 1
@@ -75,7 +75,7 @@ class EmbeddingsFilter(BaseDocumentCompressor):
75
75
  )
76
76
  embedded_query = self.embeddings.embed_query(query)
77
77
  similarity = self.similarity_fn([embedded_query], embedded_documents)[0]
78
- included_idxs = np.arange(len(embedded_documents))
78
+ included_idxs: np.ndarray = np.arange(len(embedded_documents))
79
79
  if self.k is not None:
80
80
  included_idxs = np.argsort(similarity)[::-1][: self.k]
81
81
  if self.similarity_threshold is not None:
@@ -110,7 +110,7 @@ class EmbeddingsFilter(BaseDocumentCompressor):
110
110
  )
111
111
  embedded_query = await self.embeddings.aembed_query(query)
112
112
  similarity = self.similarity_fn([embedded_query], embedded_documents)[0]
113
- included_idxs = np.arange(len(embedded_documents))
113
+ included_idxs: np.ndarray = np.arange(len(embedded_documents))
114
114
  if self.k is not None:
115
115
  included_idxs = np.argsort(similarity)[::-1][: self.k]
116
116
  if self.similarity_threshold is not None:
@@ -161,6 +161,14 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
161
161
  if isinstance(vectorstore, MongoDBAtlasVectorSearch):
162
162
  return MongoDBAtlasTranslator()
163
163
 
164
+ try:
165
+ from langchain_neo4j import Neo4jVector
166
+ except ImportError:
167
+ pass
168
+ else:
169
+ if isinstance(vectorstore, Neo4jVector):
170
+ return Neo4jTranslator()
171
+
164
172
  try:
165
173
  from langchain_chroma import Chroma
166
174
  except ImportError:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 0.3.7
3
+ Version: 0.3.11
4
4
  Summary: Building applications with LLMs through composability
5
5
  Home-page: https://github.com/langchain-ai/langchain
6
6
  License: MIT
@@ -11,15 +11,16 @@ Classifier: Programming Language :: Python :: 3.9
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
14
15
  Requires-Dist: PyYAML (>=5.3)
15
16
  Requires-Dist: SQLAlchemy (>=1.4,<3)
16
17
  Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
17
18
  Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
18
- Requires-Dist: langchain-core (>=0.3.15,<0.4.0)
19
+ Requires-Dist: langchain-core (>=0.3.24,<0.4.0)
19
20
  Requires-Dist: langchain-text-splitters (>=0.3.0,<0.4.0)
20
- Requires-Dist: langsmith (>=0.1.17,<0.2.0)
21
- Requires-Dist: numpy (>=1,<2) ; python_version < "3.12"
22
- Requires-Dist: numpy (>=1.26.0,<2.0.0) ; python_version >= "3.12"
21
+ Requires-Dist: langsmith (>=0.1.17,<0.3)
22
+ Requires-Dist: numpy (>=1.22.4,<2) ; python_version < "3.12"
23
+ Requires-Dist: numpy (>=1.26.2,<3) ; python_version >= "3.12"
23
24
  Requires-Dist: pydantic (>=2.7.4,<3.0.0)
24
25
  Requires-Dist: requests (>=2,<3)
25
26
  Requires-Dist: tenacity (>=8.1.0,!=8.4.0,<10)
@@ -1,13 +1,13 @@
1
1
  langchain/__init__.py,sha256=4cqV-N_QJnfjk52DqtR2e72vsmJC1R6PkflvRdLjZQI,13709
2
2
  langchain/_api/__init__.py,sha256=0FuHuMNUBMrst1Y1nm5yZzQr2xbLmb7rxMsimqKBXhs,733
3
- langchain/_api/deprecation.py,sha256=MpH4S7a11UDuoAGCv1RLWGn4pwhoFwEOrtONJGep40U,471
3
+ langchain/_api/deprecation.py,sha256=GuafNmTTzxJ8eveGcVrPx9pmeu1Oe_d2cTrvOIZYuvo,1082
4
4
  langchain/_api/interactive_env.py,sha256=NlnXizhm1TG3l_qKNI0qHJiHkh9q2jRjt5zGJsg_BCA,139
5
5
  langchain/_api/module_import.py,sha256=q6UZ1WadWx7curQq8HV8nGwt9WmKd7tJ0mipUyG7ll0,6347
6
6
  langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122
7
7
  langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996
9
9
  langchain/agents/__init__.py,sha256=JQJ3VlqRMRpHbjR-pkzy1yowJkdEmsQEPXTptkyHc-o,6282
10
- langchain/agents/agent.py,sha256=OyOGrmxzZ2Cirn3hB74P5Jen1o1Lcnat0pHtYHcuRT0,62383
10
+ langchain/agents/agent.py,sha256=-DP3U47kQGphkOYYcXDS8k90xOTkR3hCMLZ1mE_RAgE,62216
11
11
  langchain/agents/agent_iterator.py,sha256=Zr0aikktn-aotTvoaVGJxXIBIkHidIQGkfQIKvovkxg,16454
12
12
  langchain/agents/agent_toolkits/__init__.py,sha256=N0ylx2gzwaOqaoHRXQs9jvYNIzrnTM-2rgjNkCU5UII,7370
13
13
  langchain/agents/agent_toolkits/ainetwork/__init__.py,sha256=henfKntuAEjG1KoN-Hk1IHy3fFGCYPWLEuZtF2bIdZI,25
@@ -74,19 +74,19 @@ langchain/agents/agent_toolkits/sql/toolkit.py,sha256=CCVWRJKVuECq-eFRjatJjYsy81
74
74
  langchain/agents/agent_toolkits/steam/__init__.py,sha256=iOMgxWCt0FTNLMNq0wScgSN_YdBBq-56VM6j0Ud8GpI,21
75
75
  langchain/agents/agent_toolkits/steam/toolkit.py,sha256=V0_xpO4mC4rfWBaLyTPW-pKwd-EScTTUnvgtB1sW6Cw,659
76
76
  langchain/agents/agent_toolkits/vectorstore/__init__.py,sha256=uT5qVHjIcx3yFkWfxOzbRKL5xwWcMuFGQ-es9O7b2NQ,56
77
- langchain/agents/agent_toolkits/vectorstore/base.py,sha256=lnzst1sehVH8FExMPF8Q1TFg9BROmfp5uHRzE2LB7Mo,8462
77
+ langchain/agents/agent_toolkits/vectorstore/base.py,sha256=nahqycVjELCoK8psm49wf3pXmxyNaJUM8FJ5dH8GmXw,9122
78
78
  langchain/agents/agent_toolkits/vectorstore/prompt.py,sha256=DndLnLxi9iKjuYKo5E1nscHCOPeCoNcpl8dFHcSltxU,834
79
79
  langchain/agents/agent_toolkits/vectorstore/toolkit.py,sha256=dJhQ-0bfPOSliIFickPnJ40iefUPUyL3uV2pVaYP4pA,3210
80
80
  langchain/agents/agent_toolkits/xorbits/__init__.py,sha256=LJ-yZ3UKg4vjibzbgMXocR03vcsU_7ZvU7TlScM9RlE,1095
81
81
  langchain/agents/agent_toolkits/zapier/__init__.py,sha256=19Hc7HG8DzQfg83qqEbYiXA5FklLoRAEOfIs9JqTjX8,22
82
82
  langchain/agents/agent_toolkits/zapier/toolkit.py,sha256=BcFOzvckA9ZBz8HTeWUPFc_eIeifE3fIGE5RBSb7Yls,670
83
- langchain/agents/agent_types.py,sha256=6OjV-ZClMEaSqjAnv-7wUBG-gEGhhmjp_VsfKR8QWmQ,1943
83
+ langchain/agents/agent_types.py,sha256=b6WCaXUAXi6CK9vDaKRgGaOM5VIiUI7I5pHEdO2lRCM,1893
84
84
  langchain/agents/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
- langchain/agents/chat/base.py,sha256=m1HPKGB-W-pVNioqX1l1QF6pC8BS_skiJoa1uurHPfo,6509
85
+ langchain/agents/chat/base.py,sha256=AfUJZB-zsj9sSQ0HK1CiHYODPcZJlEygw-KaVAZCK3s,6590
86
86
  langchain/agents/chat/output_parser.py,sha256=0GRXvbNl18xqfSVHzA614pxVBuentIn--vC_QjFctoA,2367
87
87
  langchain/agents/chat/prompt.py,sha256=4Ub4oZyIKmJRpWwxOyGcYwlyoK8jJ0kR60jW0lPspC8,1158
88
88
  langchain/agents/conversational/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75
89
- langchain/agents/conversational/base.py,sha256=K9E4iWsFnyeOqZTgbSp8ncHnRhvtbY5YaTaXf4LINtc,6237
89
+ langchain/agents/conversational/base.py,sha256=2raEsfYQ5kw0voUSUF09UjJuSWRgkCYkXvGxznxS7hw,6318
90
90
  langchain/agents/conversational/output_parser.py,sha256=OXFq_96ASiAVgz-Ra0UYO_ZxAIDSWaAWEKrXQlHIgVc,1610
91
91
  langchain/agents/conversational/prompt.py,sha256=6eiZYQT9liZQr30wAhoqP_2Unph7i-qSqTWqfqdMijI,1859
92
92
  langchain/agents/conversational_chat/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75
@@ -100,14 +100,14 @@ langchain/agents/format_scratchpad/openai_functions.py,sha256=LtIroeeK_SQaxx3yAt
100
100
  langchain/agents/format_scratchpad/openai_tools.py,sha256=vyBEqvIZ5HCradWWg0weg4bj9R3nr-CpGZqvSua9HnE,166
101
101
  langchain/agents/format_scratchpad/tools.py,sha256=nyp_Z9sTnS6FLXSUfAEeZUxhpXcBLck52kdSz0Kas7I,1932
102
102
  langchain/agents/format_scratchpad/xml.py,sha256=DtMBd2-Rgi2LdfxXNImYYNcCEy5lxk8ix7-SSCOpWQY,578
103
- langchain/agents/initialize.py,sha256=7X-pR92W2sruOYlnJJnos4EmtaR0g_ZHSPAc510V_go,3629
103
+ langchain/agents/initialize.py,sha256=n2mWPXV_ZyTbmUVq4yLSP4prOvXQbFBCau-7doGprGQ,3574
104
104
  langchain/agents/json_chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
105
  langchain/agents/json_chat/base.py,sha256=V4jl4Vt_WxTotmSw8Py4rQU9GMnhHpXYC5x-wtRinxg,7994
106
106
  langchain/agents/json_chat/prompt.py,sha256=gZukOH50C1llQ-AB2QvtL-PSrczv-a-gJLIPYP8z6vA,551
107
107
  langchain/agents/load_tools.py,sha256=uMi1EZtkv2sgyUw6iXMNlCSZlIaju0Rw2svwMtkeW3E,286
108
108
  langchain/agents/loading.py,sha256=WRE-hsYnjnv1QPW91Sh9GNIJmVzcMOB-8b6YgmSwqmA,4814
109
109
  langchain/agents/mrkl/__init__.py,sha256=Gpz8w88wAF4GSXoGnuYOwZY5rhjFL5WGZvTVQa-YJas,86
110
- langchain/agents/mrkl/base.py,sha256=GnWny1MWbfms9b3tF18nyuejzC5NHwDxJbXQBsXxrSw,7046
110
+ langchain/agents/mrkl/base.py,sha256=yonYGfgMkTixmrknWROMjwjddiUCgmWEkfIaWVlJdAU,7177
111
111
  langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720
112
112
  langchain/agents/mrkl/prompt.py,sha256=2dTMP2lAWiLvCtuEijgQRjbKDlbPEnmx77duMwdJ7e4,641
113
113
  langchain/agents/openai_assistant/__init__.py,sha256=Xssaqoxrix3hn1gKSOLmDRQzTxAoJk0ProGXmXQe8Mw,114
@@ -124,13 +124,13 @@ langchain/agents/output_parsers/json.py,sha256=sW9e8fG4VlPnMn53dWIwSgnyRBUYs4ULF
124
124
  langchain/agents/output_parsers/openai_functions.py,sha256=MjNEFVCxYgS6Efr3HX4rR1zoks2vJxoV8FCUa240jPQ,3467
125
125
  langchain/agents/output_parsers/openai_tools.py,sha256=A_GpcYqy3xnkKrlBtrmHIUWwwLMyaKwWc8R-gEvRV3s,2317
126
126
  langchain/agents/output_parsers/react_json_single_input.py,sha256=SUkOGmdGGzxB4e1CNJD1eo4dJneiMYsgfGVHpxZ5bfI,2473
127
- langchain/agents/output_parsers/react_single_input.py,sha256=lIHosxNep1YFCgW9h71gEDWs59dmGeWlWedl9gWf11k,3218
127
+ langchain/agents/output_parsers/react_single_input.py,sha256=nIdieCfXKXpk-CzqvVmAQS0SBrdFS1gKb9ngVeCVYjA,3219
128
128
  langchain/agents/output_parsers/self_ask.py,sha256=-4_-hQbKB1ichR5odEyeYUV-wIdLmP5eGDxzw77Cop4,1545
129
129
  langchain/agents/output_parsers/tools.py,sha256=go3kYKW406Wi8tR8Oqy6YGD26Ab-9PHQtTQ1FHScHA0,3779
130
130
  langchain/agents/output_parsers/xml.py,sha256=2MjxW4nAM4sZN-in3K40_K5DBx6cI2Erb0TZbpSoZIY,1658
131
131
  langchain/agents/react/__init__.py,sha256=9RIjjaUDfWnoMEMpV57JQ0CwZZC5Soh357QdKpVIM-4,76
132
132
  langchain/agents/react/agent.py,sha256=TWjUeto0zrhf3YtKZ9NoM-kfOu4VR0ircrST_8HLAaM,5568
133
- langchain/agents/react/base.py,sha256=Rypd4p-ew5vGePBjo7vNVcxrcLEHn1D9_YZFKPWEXrA,5773
133
+ langchain/agents/react/base.py,sha256=26burAhSYnELPwiOCcz24VMSslJm0TlZDiiUbbR6Nv4,6038
134
134
  langchain/agents/react/output_parser.py,sha256=bEL3U3mxYGK7_7Lm4GlOq8JKQTgyHFQQIEVUUZjV1qs,1231
135
135
  langchain/agents/react/textworld_prompt.py,sha256=b9WDM8pFmqrfAWJ8n6zkxlPlxQI5oHljZ1R9g5y6cRE,1906
136
136
  langchain/agents/react/wiki_prompt.py,sha256=iQxqKo5IjsP9manfQwf5sz038Hv_hZH_CMWHtAZYKNM,6127
@@ -201,7 +201,7 @@ langchain/callbacks/wandb_callback.py,sha256=mWcDRVTlUnzQGhN2BMiGhPsKw5uyB2qDQ_L
201
201
  langchain/callbacks/whylabs_callback.py,sha256=N36XACtHYNgFSSYrNbfXiZ4nxSdwSrIE5e6xwxukrPc,688
202
202
  langchain/chains/__init__.py,sha256=xsRWTwsP3mTejfnKTzsTKRwpYT5xthXZAde30M_118U,5092
203
203
  langchain/chains/api/__init__.py,sha256=d8xBEQqFVNOMTm4qXNz5YiYkvA827Ayyd4XCG1KP-z4,84
204
- langchain/chains/api/base.py,sha256=OBdwr1T1ll4D3uSmEPjSf5jMugwTqM_aeZbGmiracZI,15221
204
+ langchain/chains/api/base.py,sha256=hUSXqxtQbNm_9G4Nofsv6WEldjhhv9ByjSdhCt1G-V8,15259
205
205
  langchain/chains/api/news_docs.py,sha256=9vzx5nSPwe_cjFV8cemlfMp4EX8wiZe2eXBuRik2Vdg,2452
206
206
  langchain/chains/api/open_meteo_docs.py,sha256=8pLSX24K37lcgq3jmgfThcuiz7WY3zkub_V6dtsqc18,3399
207
207
  langchain/chains/api/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -279,10 +279,10 @@ langchain/chains/llm_summarization_checker/prompts/create_facts.txt,sha256=hM2_E
279
279
  langchain/chains/llm_summarization_checker/prompts/revise_summary.txt,sha256=nSSq5UQMx6gvjMKIs2t_ituuEQzu2nni1wdnywAe-5U,416
280
280
  langchain/chains/llm_symbolic_math/__init__.py,sha256=KQ6bFiFMsqs8PNtU-oo6l-czNBBwQUn2rEirz3gt-w8,470
281
281
  langchain/chains/loading.py,sha256=57shFurz0r_FDoUSTcD5Hv7cZl4Rr2G2A_gT-p7XHCE,28829
282
- langchain/chains/mapreduce.py,sha256=90P4QcNczv22qye9JCuOptR9ze-0qIFotb6bOS6HX8g,4092
283
- langchain/chains/moderation.py,sha256=ezUrzTOI6uGynyglpSuGom2gK26bKtkER2UuMG4yJWQ,4427
282
+ langchain/chains/mapreduce.py,sha256=1Sjrnu21VaRtfAGQB-Mf-ssbsv3vk5-mXThwIq1IHTA,4117
283
+ langchain/chains/moderation.py,sha256=HqOo7_ySVVg1NNbP7Qknaz3CzxNujoYL7ms2qavo0HY,4428
284
284
  langchain/chains/natbot/__init__.py,sha256=ACF2TYNK_CTfvmdLlG5Ry0_j9D6ZfjgfQxmeKe1BAIg,96
285
- langchain/chains/natbot/base.py,sha256=zl_sf4dgS8dFRYTY83cdaMXq1oqEsB-ddlb7RHx5SUM,5286
285
+ langchain/chains/natbot/base.py,sha256=pS4NHgEHDjqiHRcyxzNgrFrUG56tW8nQ7BOmxjvoe6c,5433
286
286
  langchain/chains/natbot/crawler.py,sha256=E1mQUEsg8Jj6Eth-LBUcMU-Zc88JEA3a79kMhHkKO08,16050
287
287
  langchain/chains/natbot/prompt.py,sha256=zB95SYLG5_12ABFFGDtDi8vVP9DSdPoP8UCjrar_4TI,4989
288
288
  langchain/chains/openai_functions/__init__.py,sha256=o8B_I98nFTlFPkF6FPpLyt8pU3EfEPHADHr9xY5V1O0,1489
@@ -327,7 +327,7 @@ langchain/chains/router/__init__.py,sha256=r66J28FWIORVB5QIZ1d8R_HsiBaV1eQMZDZvM
327
327
  langchain/chains/router/base.py,sha256=ws6i8C4nk7YWmBqkXBcJ-FybNx4OeDJE-L1IELLK3M4,4517
328
328
  langchain/chains/router/embedding_router.py,sha256=hR5hOuwBdMBo_U3lo9SSwBfnVACR0ZpNc-nmDpei5hw,3069
329
329
  langchain/chains/router/llm_router.py,sha256=6FQUTXvZ9pekVkPeTNvQsj1jD9JdmfpMkxIPMe4oTMU,6994
330
- langchain/chains/router/multi_prompt.py,sha256=pqBIW8fLH6esLd9uiuGS5aJeXuuPTQcv7jaOHU8YhSE,4887
330
+ langchain/chains/router/multi_prompt.py,sha256=lLpJsYShzRBnvwtV3AaBbUcB8x6sK1PSxqDveCSC65A,6994
331
331
  langchain/chains/router/multi_prompt_prompt.py,sha256=T8UbIuxblnI6Byhw-BMAzwQcbB5ww3N6BiMqMJxS6Jc,1156
332
332
  langchain/chains/router/multi_retrieval_prompt.py,sha256=VUYGLWbwGiv03aSMW5sjdGNwsEa9FKgq0RcK5o3lkH4,1079
333
333
  langchain/chains/router/multi_retrieval_qa.py,sha256=tjIhHEbOwtF3CLq0qQ8Kd78ao5BXRKZLsm9UlmHrdtQ,4254
@@ -360,7 +360,7 @@ langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXz
360
360
  langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863
361
361
  langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643
362
362
  langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715
363
- langchain/chat_models/base.py,sha256=Y8cqyWUy_wK1XqPF3C6Ini93vz3U63UA2FGHni1YZXU,31650
363
+ langchain/chat_models/base.py,sha256=DL0OTo4ro35oyrVLGzRcce5bF34k32C788mUZ4lFEcE,32616
364
364
  langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757
365
365
  langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633
366
366
  langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653
@@ -572,12 +572,12 @@ langchain/document_transformers/long_context_reorder.py,sha256=uRPWoCzkRvS-rp6L3
572
572
  langchain/document_transformers/nuclia_text_transform.py,sha256=IIg8LuX116M_PrnqQE7RwN_gy45BBs1UTONtujFEMeU,678
573
573
  langchain/document_transformers/openai_functions.py,sha256=UfhBLrya4MRRNWofT87qRKcZI27J8UjZTX9gn005jEA,929
574
574
  langchain/document_transformers/xsl/html_chunks_with_headers.xslt,sha256=ti9sT_zWqZQf0aaeX5zT6tfHT1CuUpAVCvzoZWutE0o,6033
575
- langchain/embeddings/__init__.py,sha256=-DhjrerMIQyCaktSOsGqkf5lWc8zqpWZGunxDdkd-6w,8313
575
+ langchain/embeddings/__init__.py,sha256=9f7XVRZhOkU1ME7ZV-ODihjMYrEN32EP2kGdXoCRDHI,8390
576
576
  langchain/embeddings/aleph_alpha.py,sha256=_yTqGDHsHbh83Zp0MjJ497ilIxkEJm5ccmxOWbJJay4,890
577
577
  langchain/embeddings/awa.py,sha256=1cnMiwKKU3ml3Zz5s5WIpcZSlYNVFFGCaeJilrxN8HE,626
578
578
  langchain/embeddings/azure_openai.py,sha256=tmICp-NOrxoVFENBy4F_0-c0l3znf8bOtBBo-UZhajg,650
579
579
  langchain/embeddings/baidu_qianfan_endpoint.py,sha256=w7BeE53d7o9Y8Xf0cZntmmziih7oBJcmF-jBW70KJlc,662
580
- langchain/embeddings/base.py,sha256=1f9mAt3_kGWFWZVrt2H_6VXzPdbe8910YtJYKoYmoJs,113
580
+ langchain/embeddings/base.py,sha256=YOZ4S2qmbnJ889P3mzvSD_2uQdr3a-U5QMhHIXU8Fo0,7385
581
581
  langchain/embeddings/bedrock.py,sha256=tCBm3vcN0B21Ga6KvNwhgJpgjobC2VEcmPApUmwXO4E,638
582
582
  langchain/embeddings/bookend.py,sha256=qWaQXZw9Gq11kEdfIO71h1H0NaXqVKm45TiStxd2xaM,638
583
583
  langchain/embeddings/cache.py,sha256=69qxrvD4S5gtQvzv72a4sP9cES-KE3fH908C1XRDIDI,10187
@@ -804,12 +804,12 @@ langchain/memory/motorhead_memory.py,sha256=OXjtlAQi1ioRXdM3GVcYmReynkKn8Vm1e5Tr
804
804
  langchain/memory/prompt.py,sha256=r8vxZSRydSOWJzRszStN0Wky4n3fyM_QJ2XoKMsP3JA,8181
805
805
  langchain/memory/readonly.py,sha256=IbZFbyuPo_bHEzyACQcLIcOPpczoX5CLfM_n0YllYjw,792
806
806
  langchain/memory/simple.py,sha256=7El81OHJA0HBqwJ-AZDTQFPfB7B5NEsmY_fEOrwD0XA,761
807
- langchain/memory/summary.py,sha256=KS6V7eD2hCsX5vtLUO20VbVVFprkXhGAFxVFEiiEIaA,4503
807
+ langchain/memory/summary.py,sha256=LHf8a59eQKFFGujcmpRnzeUf7J1Fjv9qy-a9CLwN6S4,4706
808
808
  langchain/memory/summary_buffer.py,sha256=ynYbCa-XEjFeYcVIwyjsiOShWyLj6v1sDmurdv1kGUM,5514
809
809
  langchain/memory/token_buffer.py,sha256=jYtua6S5M6R2KyElsqXc8VRuGNsu7YVpavINj91HfGg,2556
810
810
  langchain/memory/utils.py,sha256=PvauM6AkPRX5Hy5sY6NysuieRI9Oae1IeC61y1iIQMs,617
811
811
  langchain/memory/vectorstore.py,sha256=RdOX2EDSFXAC6LEE_9aYWIJcVoZ32lUQuludOgPCAoc,4189
812
- langchain/memory/vectorstore_token_buffer_memory.py,sha256=CSuatQSOEs7iKeMBhKLUqDvNrdl12lquvC89q9_NlXo,7602
812
+ langchain/memory/vectorstore_token_buffer_memory.py,sha256=73GYFp_hExF1IRc6xFTOYU4lLdQAp0cvig6858OAJVQ,7618
813
813
  langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628
814
814
  langchain/model_laboratory.py,sha256=IaJzVG_SbFX7W6ODriqqme-Q5x0MB18j4Bhg1Y-fWLo,3278
815
815
  langchain/output_parsers/__init__.py,sha256=A9fDuB-lYuOIN8QbDx-fULqSwugB7saLRKD23gdaIl4,2720
@@ -818,7 +818,7 @@ langchain/output_parsers/combining.py,sha256=tBQx3lVAz4YL52unRsRGofAgQPFbIgDU8Mn
818
818
  langchain/output_parsers/datetime.py,sha256=zxhwax0YxVahE3CCHMXTqjpyzQcffgZ9J0NA0qLL0_8,1974
819
819
  langchain/output_parsers/enum.py,sha256=VrkErkDrW6JEiIOjw18J0D4p_BU0p59pUcb7W1sRLbk,1267
820
820
  langchain/output_parsers/ernie_functions.py,sha256=86DsYlAGncjRalnmw5ZGwhH80lP2ms6zaw8PJGC3m3Q,1427
821
- langchain/output_parsers/fix.py,sha256=TuKf5pH3wNO_kqJClz5RxPUH-szPvp899zRhTeq7wdE,5568
821
+ langchain/output_parsers/fix.py,sha256=QAra1xRX1fgEOkahlrCanT_3gYPq1waRtwJ0jBVLme8,5590
822
822
  langchain/output_parsers/format_instructions.py,sha256=y5oSpjwzgmvYRNhfe0JmKHHdFZZP65L2snJI6xcMXEY,3958
823
823
  langchain/output_parsers/json.py,sha256=2FJL7uLd7pHgvpQm-r5XDyt9S1ZZ9mlJUW8ilQAQ0k4,340
824
824
  langchain/output_parsers/list.py,sha256=D35r0U51Xy5wHn-VcWxr97Ftul4UqszmyLetDi4syYQ,310
@@ -831,7 +831,7 @@ langchain/output_parsers/pydantic.py,sha256=uxbrfdyPnZxfdDvmuDr3QOmBFMwML3SfMDEm
831
831
  langchain/output_parsers/rail_parser.py,sha256=iHmX3ux2jE2k0MsLqe5XCrJ1eQOBBfZtRbRzQoYPTfU,691
832
832
  langchain/output_parsers/regex.py,sha256=TAkxKzxRQQ810LuXbxYatwLZgsYhoVwez3j5e2P55bA,1230
833
833
  langchain/output_parsers/regex_dict.py,sha256=UK6iL4Hx-q6UlPNEGLAnbh7_8-IwtXY2V1-_KicG1Z8,1725
834
- langchain/output_parsers/retry.py,sha256=QPLKiY5uSU8QcnfW067qYVCWqvaktsH5ulR0bo0qgoM,10457
834
+ langchain/output_parsers/retry.py,sha256=OebVl-COlUP5FbAtPvHjXCWYYyQagj4z0wURwZRyWqM,10569
835
835
  langchain/output_parsers/structured.py,sha256=R38VNhDr-xD9zM30q71h31ApZofi9UaAkMW7xCz6S2U,3147
836
836
  langchain/output_parsers/xml.py,sha256=WDHazWjxO-nDAzxkBJrd1tGINVrzo4mH2-Qgqtz9Y2w,93
837
837
  langchain/output_parsers/yaml.py,sha256=jKxg4cBFF6LCfoIexu9Q4M4LX7MQzb7QbMRT4_bZ5Y0,2409
@@ -875,7 +875,7 @@ langchain/retrievers/document_compressors/chain_filter_prompt.py,sha256=FTQRPiEs
875
875
  langchain/retrievers/document_compressors/cohere_rerank.py,sha256=uo9rRozAvgLM9sUEcE929SnnTvE3CHBDiEhf_S0UufQ,4508
876
876
  langchain/retrievers/document_compressors/cross_encoder.py,sha256=_Z7SoPSfOUSk-rNIHX2lQgYV0TgVMKf3F9AnTH7EFiM,393
877
877
  langchain/retrievers/document_compressors/cross_encoder_rerank.py,sha256=ThgVrX8NeXFzE4eoftBoa1yz-sBJiDb-JISQa9Hep2k,1542
878
- langchain/retrievers/document_compressors/embeddings_filter.py,sha256=8gIQY88ycf5BMRtulwedTWLkwAGp5kMEHR_nhXZz1Ms,5193
878
+ langchain/retrievers/document_compressors/embeddings_filter.py,sha256=_04uA8wOw5Eb5rzlu-6rLqxi9u7kqeD8t4xd9VsB_PA,5217
879
879
  langchain/retrievers/document_compressors/flashrank_rerank.py,sha256=Eo86fJ_T2IbEEeCkI_5rb3Ao4gsdenv-_Ukt33MuMko,709
880
880
  langchain/retrievers/document_compressors/listwise_rerank.py,sha256=i3dCqXBF27_sHPGxWOlCkVjt4s85QM0ikHZtPp2LpDs,5127
881
881
  langchain/retrievers/elastic_search_bm25.py,sha256=eRboOkRQj-_E53gUQIZzxQ1bX0-uEMv7LAQSD7K7Qf8,665
@@ -901,7 +901,7 @@ langchain/retrievers/re_phraser.py,sha256=5H2CAhUNl95wLY2IZf155hlCGr_wgDq7Y1DcYq
901
901
  langchain/retrievers/remote_retriever.py,sha256=f1jPII31IkNrhkH1LvlUlNLRQNMKNvgE_7qHa3o3P04,659
902
902
  langchain/retrievers/self_query/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
903
903
  langchain/retrievers/self_query/astradb.py,sha256=lxlkYOr8xicH7MNyQKIg3Wc-XwhVpKGBn7maqYyR3Hk,670
904
- langchain/retrievers/self_query/base.py,sha256=obiSvc-wbF3FTI1cwHZSl6CKS4m-RDcSDrgacsbKVLQ,13751
904
+ langchain/retrievers/self_query/base.py,sha256=6HLzqmChwlM1jiXRWUPkDJMPi8-dvNJPUfsE-Aqputg,13970
905
905
  langchain/retrievers/self_query/chroma.py,sha256=F0u_3Id1J1hIYM2D8_oNL2JJVetTFDyqW6fuGhjZ0ew,665
906
906
  langchain/retrievers/self_query/dashvector.py,sha256=CJAJQuJYNmw_GUIwwlPx3Scu1uDESTnFF-CzZEwFRRg,685
907
907
  langchain/retrievers/self_query/databricks_vector_search.py,sha256=S9V-XRfG6taeW3yRx_NZs4h-R4TiyHLnuJTIZa5rsqM,782
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
1335
1335
  langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
1336
1336
  langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
1337
1337
  langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
1338
- langchain-0.3.7.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
1339
- langchain-0.3.7.dist-info/METADATA,sha256=dnEGxVj9Mtqfl-VUt9arKGHht18C67QuVMiQgYfc7hU,7076
1340
- langchain-0.3.7.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
1341
- langchain-0.3.7.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
1342
- langchain-0.3.7.dist-info/RECORD,,
1338
+ langchain-0.3.11.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
1339
+ langchain-0.3.11.dist-info/METADATA,sha256=O0XGCzX_AZu0BV-qcpu6jKUMJjiqx6JoRZstPppOIWE,7127
1340
+ langchain-0.3.11.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
1341
+ langchain-0.3.11.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
1342
+ langchain-0.3.11.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.8.1
2
+ Generator: poetry-core 1.9.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any