langchain 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/_api/deprecation.py +13 -0
- langchain/agents/agent.py +3 -8
- langchain/agents/agent_toolkits/vectorstore/base.py +8 -0
- langchain/agents/agent_types.py +3 -4
- langchain/agents/chat/base.py +6 -1
- langchain/agents/conversational/base.py +6 -1
- langchain/agents/initialize.py +2 -4
- langchain/agents/mrkl/base.py +11 -2
- langchain/agents/react/base.py +21 -4
- langchain/chains/router/multi_prompt.py +70 -22
- langchain/chat_models/base.py +19 -7
- langchain/embeddings/__init__.py +2 -0
- langchain/embeddings/base.py +222 -2
- langchain/retrievers/self_query/base.py +8 -0
- {langchain-0.3.8.dist-info → langchain-0.3.10.dist-info}/METADATA +4 -3
- {langchain-0.3.8.dist-info → langchain-0.3.10.dist-info}/RECORD +19 -19
- {langchain-0.3.8.dist-info → langchain-0.3.10.dist-info}/WHEEL +1 -1
- {langchain-0.3.8.dist-info → langchain-0.3.10.dist-info}/LICENSE +0 -0
- {langchain-0.3.8.dist-info → langchain-0.3.10.dist-info}/entry_points.txt +0 -0
langchain/_api/deprecation.py
CHANGED
|
@@ -7,7 +7,20 @@ from langchain_core._api.deprecation import (
|
|
|
7
7
|
warn_deprecated,
|
|
8
8
|
)
|
|
9
9
|
|
|
10
|
+
AGENT_DEPRECATION_WARNING = (
|
|
11
|
+
"LangChain agents will continue to be supported, but it is recommended for new "
|
|
12
|
+
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
|
|
13
|
+
"full-featured framework for building agents, including support for "
|
|
14
|
+
"tool-calling, persistence of state, and human-in-the-loop workflows. See "
|
|
15
|
+
"LangGraph documentation for more details: "
|
|
16
|
+
"https://langchain-ai.github.io/langgraph/. Refer here for its pre-built "
|
|
17
|
+
"ReAct agent: "
|
|
18
|
+
"https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
10
22
|
__all__ = [
|
|
23
|
+
"AGENT_DEPRECATION_WARNING",
|
|
11
24
|
"LangChainDeprecationWarning",
|
|
12
25
|
"LangChainPendingDeprecationWarning",
|
|
13
26
|
"deprecated",
|
langchain/agents/agent.py
CHANGED
|
@@ -47,6 +47,7 @@ from langchain_core.utils.input import get_color_mapping
|
|
|
47
47
|
from pydantic import BaseModel, ConfigDict, model_validator
|
|
48
48
|
from typing_extensions import Self
|
|
49
49
|
|
|
50
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
50
51
|
from langchain.agents.agent_iterator import AgentExecutorIterator
|
|
51
52
|
from langchain.agents.agent_types import AgentType
|
|
52
53
|
from langchain.agents.tools import InvalidTool
|
|
@@ -633,10 +634,7 @@ class RunnableMultiActionAgent(BaseMultiActionAgent):
|
|
|
633
634
|
|
|
634
635
|
@deprecated(
|
|
635
636
|
"0.1.0",
|
|
636
|
-
message=
|
|
637
|
-
"Use new agent constructor methods like create_react_agent, create_json_agent, "
|
|
638
|
-
"create_structured_chat_agent, etc."
|
|
639
|
-
),
|
|
637
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
640
638
|
removal="1.0",
|
|
641
639
|
)
|
|
642
640
|
class LLMSingleActionAgent(BaseSingleActionAgent):
|
|
@@ -724,10 +722,7 @@ class LLMSingleActionAgent(BaseSingleActionAgent):
|
|
|
724
722
|
|
|
725
723
|
@deprecated(
|
|
726
724
|
"0.1.0",
|
|
727
|
-
message=
|
|
728
|
-
"Use new agent constructor methods like create_react_agent, create_json_agent, "
|
|
729
|
-
"create_structured_chat_agent, etc."
|
|
730
|
-
),
|
|
725
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
731
726
|
removal="1.0",
|
|
732
727
|
)
|
|
733
728
|
class Agent(BaseSingleActionAgent):
|
|
@@ -20,6 +20,10 @@ from langchain.chains.llm import LLMChain
|
|
|
20
20
|
since="0.2.13",
|
|
21
21
|
removal="1.0",
|
|
22
22
|
message=(
|
|
23
|
+
"This function will continue to be supported, but it is recommended for new "
|
|
24
|
+
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
|
|
25
|
+
"full-featured framework for building agents, including support for "
|
|
26
|
+
"tool-calling, persistence of state, and human-in-the-loop workflows. "
|
|
23
27
|
"See API reference for this function for a replacement implementation: "
|
|
24
28
|
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
|
|
25
29
|
"Read more here on how to create agents that query vector stores: "
|
|
@@ -109,6 +113,10 @@ def create_vectorstore_agent(
|
|
|
109
113
|
since="0.2.13",
|
|
110
114
|
removal="1.0",
|
|
111
115
|
message=(
|
|
116
|
+
"This function will continue to be supported, but it is recommended for new "
|
|
117
|
+
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
|
|
118
|
+
"full-featured framework for building agents, including support for "
|
|
119
|
+
"tool-calling, persistence of state, and human-in-the-loop workflows. "
|
|
112
120
|
"See API reference for this function for a replacement implementation: "
|
|
113
121
|
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
|
|
114
122
|
"Read more here on how to create agents that query vector stores: "
|
langchain/agents/agent_types.py
CHANGED
|
@@ -4,13 +4,12 @@ from enum import Enum
|
|
|
4
4
|
|
|
5
5
|
from langchain_core._api import deprecated
|
|
6
6
|
|
|
7
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
8
|
+
|
|
7
9
|
|
|
8
10
|
@deprecated(
|
|
9
11
|
"0.1.0",
|
|
10
|
-
message=
|
|
11
|
-
"Use new agent constructor methods like create_react_agent, create_json_agent, "
|
|
12
|
-
"create_structured_chat_agent, etc."
|
|
13
|
-
),
|
|
12
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
14
13
|
removal="1.0",
|
|
15
14
|
)
|
|
16
15
|
class AgentType(str, Enum):
|
langchain/agents/chat/base.py
CHANGED
|
@@ -13,6 +13,7 @@ from langchain_core.prompts.chat import (
|
|
|
13
13
|
from langchain_core.tools import BaseTool
|
|
14
14
|
from pydantic import Field
|
|
15
15
|
|
|
16
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
16
17
|
from langchain.agents.agent import Agent, AgentOutputParser
|
|
17
18
|
from langchain.agents.chat.output_parser import ChatOutputParser
|
|
18
19
|
from langchain.agents.chat.prompt import (
|
|
@@ -25,7 +26,11 @@ from langchain.agents.utils import validate_tools_single_input
|
|
|
25
26
|
from langchain.chains.llm import LLMChain
|
|
26
27
|
|
|
27
28
|
|
|
28
|
-
@deprecated(
|
|
29
|
+
@deprecated(
|
|
30
|
+
"0.1.0",
|
|
31
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
32
|
+
removal="1.0",
|
|
33
|
+
)
|
|
29
34
|
class ChatAgent(Agent):
|
|
30
35
|
"""Chat Agent."""
|
|
31
36
|
|
|
@@ -11,6 +11,7 @@ from langchain_core.prompts import PromptTemplate
|
|
|
11
11
|
from langchain_core.tools import BaseTool
|
|
12
12
|
from pydantic import Field
|
|
13
13
|
|
|
14
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
14
15
|
from langchain.agents.agent import Agent, AgentOutputParser
|
|
15
16
|
from langchain.agents.agent_types import AgentType
|
|
16
17
|
from langchain.agents.conversational.output_parser import ConvoOutputParser
|
|
@@ -19,7 +20,11 @@ from langchain.agents.utils import validate_tools_single_input
|
|
|
19
20
|
from langchain.chains import LLMChain
|
|
20
21
|
|
|
21
22
|
|
|
22
|
-
@deprecated(
|
|
23
|
+
@deprecated(
|
|
24
|
+
"0.1.0",
|
|
25
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
26
|
+
removal="1.0",
|
|
27
|
+
)
|
|
23
28
|
class ConversationalAgent(Agent):
|
|
24
29
|
"""An agent that holds a conversation in addition to using tools."""
|
|
25
30
|
|
langchain/agents/initialize.py
CHANGED
|
@@ -7,6 +7,7 @@ from langchain_core.callbacks import BaseCallbackManager
|
|
|
7
7
|
from langchain_core.language_models import BaseLanguageModel
|
|
8
8
|
from langchain_core.tools import BaseTool
|
|
9
9
|
|
|
10
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
10
11
|
from langchain.agents.agent import AgentExecutor
|
|
11
12
|
from langchain.agents.agent_types import AgentType
|
|
12
13
|
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
|
|
@@ -14,10 +15,7 @@ from langchain.agents.loading import AGENT_TO_CLASS, load_agent
|
|
|
14
15
|
|
|
15
16
|
@deprecated(
|
|
16
17
|
"0.1.0",
|
|
17
|
-
|
|
18
|
-
"Use new agent constructor methods like create_react_agent, create_json_agent, "
|
|
19
|
-
"create_structured_chat_agent, etc."
|
|
20
|
-
),
|
|
18
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
21
19
|
removal="1.0",
|
|
22
20
|
)
|
|
23
21
|
def initialize_agent(
|
langchain/agents/mrkl/base.py
CHANGED
|
@@ -12,6 +12,7 @@ from langchain_core.tools import BaseTool, Tool
|
|
|
12
12
|
from langchain_core.tools.render import render_text_description
|
|
13
13
|
from pydantic import Field
|
|
14
14
|
|
|
15
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
15
16
|
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
|
|
16
17
|
from langchain.agents.agent_types import AgentType
|
|
17
18
|
from langchain.agents.mrkl.output_parser import MRKLOutputParser
|
|
@@ -34,7 +35,11 @@ class ChainConfig(NamedTuple):
|
|
|
34
35
|
action_description: str
|
|
35
36
|
|
|
36
37
|
|
|
37
|
-
@deprecated(
|
|
38
|
+
@deprecated(
|
|
39
|
+
"0.1.0",
|
|
40
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
41
|
+
removal="1.0",
|
|
42
|
+
)
|
|
38
43
|
class ZeroShotAgent(Agent):
|
|
39
44
|
"""Agent for the MRKL chain.
|
|
40
45
|
|
|
@@ -168,7 +173,11 @@ class ZeroShotAgent(Agent):
|
|
|
168
173
|
super()._validate_tools(tools)
|
|
169
174
|
|
|
170
175
|
|
|
171
|
-
@deprecated(
|
|
176
|
+
@deprecated(
|
|
177
|
+
"0.1.0",
|
|
178
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
179
|
+
removal="1.0",
|
|
180
|
+
)
|
|
172
181
|
class MRKLChain(AgentExecutor):
|
|
173
182
|
"""Chain that implements the MRKL system."""
|
|
174
183
|
|
langchain/agents/react/base.py
CHANGED
|
@@ -11,6 +11,7 @@ from langchain_core.prompts import BasePromptTemplate
|
|
|
11
11
|
from langchain_core.tools import BaseTool, Tool
|
|
12
12
|
from pydantic import Field
|
|
13
13
|
|
|
14
|
+
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
|
|
14
15
|
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
|
|
15
16
|
from langchain.agents.agent_types import AgentType
|
|
16
17
|
from langchain.agents.react.output_parser import ReActOutputParser
|
|
@@ -22,7 +23,11 @@ if TYPE_CHECKING:
|
|
|
22
23
|
from langchain_community.docstore.base import Docstore
|
|
23
24
|
|
|
24
25
|
|
|
25
|
-
@deprecated(
|
|
26
|
+
@deprecated(
|
|
27
|
+
"0.1.0",
|
|
28
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
29
|
+
removal="1.0",
|
|
30
|
+
)
|
|
26
31
|
class ReActDocstoreAgent(Agent):
|
|
27
32
|
"""Agent for the ReAct chain."""
|
|
28
33
|
|
|
@@ -69,7 +74,11 @@ class ReActDocstoreAgent(Agent):
|
|
|
69
74
|
return "Thought:"
|
|
70
75
|
|
|
71
76
|
|
|
72
|
-
@deprecated(
|
|
77
|
+
@deprecated(
|
|
78
|
+
"0.1.0",
|
|
79
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
80
|
+
removal="1.0",
|
|
81
|
+
)
|
|
73
82
|
class DocstoreExplorer:
|
|
74
83
|
"""Class to assist with exploration of a document store."""
|
|
75
84
|
|
|
@@ -119,7 +128,11 @@ class DocstoreExplorer:
|
|
|
119
128
|
return self.document.page_content.split("\n\n")
|
|
120
129
|
|
|
121
130
|
|
|
122
|
-
@deprecated(
|
|
131
|
+
@deprecated(
|
|
132
|
+
"0.1.0",
|
|
133
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
134
|
+
removal="1.0",
|
|
135
|
+
)
|
|
123
136
|
class ReActTextWorldAgent(ReActDocstoreAgent):
|
|
124
137
|
"""Agent for the ReAct TextWorld chain."""
|
|
125
138
|
|
|
@@ -139,7 +152,11 @@ class ReActTextWorldAgent(ReActDocstoreAgent):
|
|
|
139
152
|
raise ValueError(f"Tool name should be Play, got {tool_names}")
|
|
140
153
|
|
|
141
154
|
|
|
142
|
-
@deprecated(
|
|
155
|
+
@deprecated(
|
|
156
|
+
"0.1.0",
|
|
157
|
+
message=AGENT_DEPRECATION_WARNING,
|
|
158
|
+
removal="1.0",
|
|
159
|
+
)
|
|
143
160
|
class ReActChain(AgentExecutor):
|
|
144
161
|
"""[Deprecated] Chain that implements the ReAct paper."""
|
|
145
162
|
|
|
@@ -20,9 +20,8 @@ from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMP
|
|
|
20
20
|
since="0.2.12",
|
|
21
21
|
removal="1.0",
|
|
22
22
|
message=(
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html" # noqa: E501
|
|
23
|
+
"Please see migration guide here for recommended implementation: "
|
|
24
|
+
"https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/" # noqa: E501
|
|
26
25
|
),
|
|
27
26
|
)
|
|
28
27
|
class MultiPromptChain(MultiRouteChain):
|
|
@@ -37,60 +36,109 @@ class MultiPromptChain(MultiRouteChain):
|
|
|
37
36
|
|
|
38
37
|
from operator import itemgetter
|
|
39
38
|
from typing import Literal
|
|
40
|
-
from typing_extensions import TypedDict
|
|
41
39
|
|
|
42
40
|
from langchain_core.output_parsers import StrOutputParser
|
|
43
41
|
from langchain_core.prompts import ChatPromptTemplate
|
|
44
|
-
from langchain_core.runnables import
|
|
42
|
+
from langchain_core.runnables import RunnableConfig
|
|
45
43
|
from langchain_openai import ChatOpenAI
|
|
44
|
+
from langgraph.graph import END, START, StateGraph
|
|
45
|
+
from typing_extensions import TypedDict
|
|
46
46
|
|
|
47
47
|
llm = ChatOpenAI(model="gpt-4o-mini")
|
|
48
48
|
|
|
49
|
+
# Define the prompts we will route to
|
|
49
50
|
prompt_1 = ChatPromptTemplate.from_messages(
|
|
50
51
|
[
|
|
51
52
|
("system", "You are an expert on animals."),
|
|
52
|
-
("human", "{
|
|
53
|
+
("human", "{input}"),
|
|
53
54
|
]
|
|
54
55
|
)
|
|
55
56
|
prompt_2 = ChatPromptTemplate.from_messages(
|
|
56
57
|
[
|
|
57
58
|
("system", "You are an expert on vegetables."),
|
|
58
|
-
("human", "{
|
|
59
|
+
("human", "{input}"),
|
|
59
60
|
]
|
|
60
61
|
)
|
|
61
62
|
|
|
63
|
+
# Construct the chains we will route to. These format the input query
|
|
64
|
+
# into the respective prompt, run it through a chat model, and cast
|
|
65
|
+
# the result to a string.
|
|
62
66
|
chain_1 = prompt_1 | llm | StrOutputParser()
|
|
63
67
|
chain_2 = prompt_2 | llm | StrOutputParser()
|
|
64
68
|
|
|
69
|
+
|
|
70
|
+
# Next: define the chain that selects which branch to route to.
|
|
71
|
+
# Here we will take advantage of tool-calling features to force
|
|
72
|
+
# the output to select one of two desired branches.
|
|
65
73
|
route_system = "Route the user's query to either the animal or vegetable expert."
|
|
66
74
|
route_prompt = ChatPromptTemplate.from_messages(
|
|
67
75
|
[
|
|
68
76
|
("system", route_system),
|
|
69
|
-
("human", "{
|
|
77
|
+
("human", "{input}"),
|
|
70
78
|
]
|
|
71
79
|
)
|
|
72
80
|
|
|
73
81
|
|
|
82
|
+
# Define schema for output:
|
|
74
83
|
class RouteQuery(TypedDict):
|
|
75
|
-
\"\"\"Route query to destination.\"\"\"
|
|
84
|
+
\"\"\"Route query to destination expert.\"\"\"
|
|
85
|
+
|
|
76
86
|
destination: Literal["animal", "vegetable"]
|
|
77
87
|
|
|
78
88
|
|
|
79
|
-
route_chain = (
|
|
80
|
-
route_prompt
|
|
81
|
-
| llm.with_structured_output(RouteQuery)
|
|
82
|
-
| itemgetter("destination")
|
|
83
|
-
)
|
|
89
|
+
route_chain = route_prompt | llm.with_structured_output(RouteQuery)
|
|
84
90
|
|
|
85
|
-
chain = {
|
|
86
|
-
"destination": route_chain, # "animal" or "vegetable"
|
|
87
|
-
"query": lambda x: x["query"], # pass through input query
|
|
88
|
-
} | RunnableLambda(
|
|
89
|
-
# if animal, chain_1. otherwise, chain_2.
|
|
90
|
-
lambda x: chain_1 if x["destination"] == "animal" else chain_2,
|
|
91
|
-
)
|
|
92
91
|
|
|
93
|
-
|
|
92
|
+
# For LangGraph, we will define the state of the graph to hold the query,
|
|
93
|
+
# destination, and final answer.
|
|
94
|
+
class State(TypedDict):
|
|
95
|
+
query: str
|
|
96
|
+
destination: RouteQuery
|
|
97
|
+
answer: str
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# We define functions for each node, including routing the query:
|
|
101
|
+
async def route_query(state: State, config: RunnableConfig):
|
|
102
|
+
destination = await route_chain.ainvoke(state["query"], config)
|
|
103
|
+
return {"destination": destination}
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
# And one node for each prompt
|
|
107
|
+
async def prompt_1(state: State, config: RunnableConfig):
|
|
108
|
+
return {"answer": await chain_1.ainvoke(state["query"], config)}
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
async def prompt_2(state: State, config: RunnableConfig):
|
|
112
|
+
return {"answer": await chain_2.ainvoke(state["query"], config)}
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# We then define logic that selects the prompt based on the classification
|
|
116
|
+
def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
|
|
117
|
+
if state["destination"] == "animal":
|
|
118
|
+
return "prompt_1"
|
|
119
|
+
else:
|
|
120
|
+
return "prompt_2"
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# Finally, assemble the multi-prompt chain. This is a sequence of two steps:
|
|
124
|
+
# 1) Select "animal" or "vegetable" via the route_chain, and collect the answer
|
|
125
|
+
# alongside the input query.
|
|
126
|
+
# 2) Route the input query to chain_1 or chain_2, based on the
|
|
127
|
+
# selection.
|
|
128
|
+
graph = StateGraph(State)
|
|
129
|
+
graph.add_node("route_query", route_query)
|
|
130
|
+
graph.add_node("prompt_1", prompt_1)
|
|
131
|
+
graph.add_node("prompt_2", prompt_2)
|
|
132
|
+
|
|
133
|
+
graph.add_edge(START, "route_query")
|
|
134
|
+
graph.add_conditional_edges("route_query", select_node)
|
|
135
|
+
graph.add_edge("prompt_1", END)
|
|
136
|
+
graph.add_edge("prompt_2", END)
|
|
137
|
+
app = graph.compile()
|
|
138
|
+
|
|
139
|
+
result = await app.ainvoke({"query": "what color are carrots"})
|
|
140
|
+
print(result["destination"])
|
|
141
|
+
print(result["answer"])
|
|
94
142
|
""" # noqa: E501
|
|
95
143
|
|
|
96
144
|
@property
|
langchain/chat_models/base.py
CHANGED
|
@@ -328,13 +328,7 @@ def init_chat_model(
|
|
|
328
328
|
def _init_chat_model_helper(
|
|
329
329
|
model: str, *, model_provider: Optional[str] = None, **kwargs: Any
|
|
330
330
|
) -> BaseChatModel:
|
|
331
|
-
model_provider =
|
|
332
|
-
if not model_provider:
|
|
333
|
-
raise ValueError(
|
|
334
|
-
f"Unable to infer model provider for {model=}, please specify "
|
|
335
|
-
f"model_provider directly."
|
|
336
|
-
)
|
|
337
|
-
model_provider = model_provider.replace("-", "_").lower()
|
|
331
|
+
model, model_provider = _parse_model(model, model_provider)
|
|
338
332
|
if model_provider == "openai":
|
|
339
333
|
_check_pkg("langchain_openai")
|
|
340
334
|
from langchain_openai import ChatOpenAI
|
|
@@ -461,6 +455,24 @@ def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
|
|
|
461
455
|
return None
|
|
462
456
|
|
|
463
457
|
|
|
458
|
+
def _parse_model(model: str, model_provider: Optional[str]) -> Tuple[str, str]:
|
|
459
|
+
if (
|
|
460
|
+
not model_provider
|
|
461
|
+
and ":" in model
|
|
462
|
+
and model.split(":")[0] in _SUPPORTED_PROVIDERS
|
|
463
|
+
):
|
|
464
|
+
model_provider = model.split(":")[0]
|
|
465
|
+
model = ":".join(model.split(":")[1:])
|
|
466
|
+
model_provider = model_provider or _attempt_infer_model_provider(model)
|
|
467
|
+
if not model_provider:
|
|
468
|
+
raise ValueError(
|
|
469
|
+
f"Unable to infer model provider for {model=}, please specify "
|
|
470
|
+
f"model_provider directly."
|
|
471
|
+
)
|
|
472
|
+
model_provider = model_provider.replace("-", "_").lower()
|
|
473
|
+
return model, model_provider
|
|
474
|
+
|
|
475
|
+
|
|
464
476
|
def _check_pkg(pkg: str) -> None:
|
|
465
477
|
if not util.find_spec(pkg):
|
|
466
478
|
pkg_kebab = pkg.replace("_", "-")
|
langchain/embeddings/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@ import logging
|
|
|
14
14
|
from typing import TYPE_CHECKING, Any
|
|
15
15
|
|
|
16
16
|
from langchain._api import create_importer
|
|
17
|
+
from langchain.embeddings.base import init_embeddings
|
|
17
18
|
from langchain.embeddings.cache import CacheBackedEmbeddings
|
|
18
19
|
|
|
19
20
|
if TYPE_CHECKING:
|
|
@@ -221,4 +222,5 @@ __all__ = [
|
|
|
221
222
|
"VertexAIEmbeddings",
|
|
222
223
|
"VoyageEmbeddings",
|
|
223
224
|
"XinferenceEmbeddings",
|
|
225
|
+
"init_embeddings",
|
|
224
226
|
]
|
langchain/embeddings/base.py
CHANGED
|
@@ -1,4 +1,224 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
from importlib import util
|
|
3
|
+
from typing import Any, List, Optional, Tuple, Union
|
|
4
|
+
|
|
5
|
+
from langchain_core._api import beta
|
|
1
6
|
from langchain_core.embeddings import Embeddings
|
|
7
|
+
from langchain_core.runnables import Runnable
|
|
8
|
+
|
|
9
|
+
_SUPPORTED_PROVIDERS = {
|
|
10
|
+
"azure_openai": "langchain_openai",
|
|
11
|
+
"bedrock": "langchain_aws",
|
|
12
|
+
"cohere": "langchain_cohere",
|
|
13
|
+
"google_vertexai": "langchain_google_vertexai",
|
|
14
|
+
"huggingface": "langchain_huggingface",
|
|
15
|
+
"mistralai": "langchain_mistralai",
|
|
16
|
+
"openai": "langchain_openai",
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _get_provider_list() -> str:
|
|
21
|
+
"""Get formatted list of providers and their packages."""
|
|
22
|
+
return "\n".join(
|
|
23
|
+
f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items()
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _parse_model_string(model_name: str) -> Tuple[str, str]:
|
|
28
|
+
"""Parse a model string into provider and model name components.
|
|
29
|
+
|
|
30
|
+
The model string should be in the format 'provider:model-name', where provider
|
|
31
|
+
is one of the supported providers.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
model_name: A model string in the format 'provider:model-name'
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
A tuple of (provider, model_name)
|
|
38
|
+
|
|
39
|
+
.. code-block:: python
|
|
40
|
+
|
|
41
|
+
_parse_model_string("openai:text-embedding-3-small")
|
|
42
|
+
# Returns: ("openai", "text-embedding-3-small")
|
|
43
|
+
|
|
44
|
+
_parse_model_string("bedrock:amazon.titan-embed-text-v1")
|
|
45
|
+
# Returns: ("bedrock", "amazon.titan-embed-text-v1")
|
|
46
|
+
|
|
47
|
+
Raises:
|
|
48
|
+
ValueError: If the model string is not in the correct format or
|
|
49
|
+
the provider is unsupported
|
|
50
|
+
"""
|
|
51
|
+
if ":" not in model_name:
|
|
52
|
+
providers = _SUPPORTED_PROVIDERS
|
|
53
|
+
raise ValueError(
|
|
54
|
+
f"Invalid model format '{model_name}'.\n"
|
|
55
|
+
f"Model name must be in format 'provider:model-name'\n"
|
|
56
|
+
f"Example valid model strings:\n"
|
|
57
|
+
f" - openai:text-embedding-3-small\n"
|
|
58
|
+
f" - bedrock:amazon.titan-embed-text-v1\n"
|
|
59
|
+
f" - cohere:embed-english-v3.0\n"
|
|
60
|
+
f"Supported providers: {providers}"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
provider, model = model_name.split(":", 1)
|
|
64
|
+
provider = provider.lower().strip()
|
|
65
|
+
model = model.strip()
|
|
66
|
+
|
|
67
|
+
if provider not in _SUPPORTED_PROVIDERS:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"Provider '{provider}' is not supported.\n"
|
|
70
|
+
f"Supported providers and their required packages:\n"
|
|
71
|
+
f"{_get_provider_list()}"
|
|
72
|
+
)
|
|
73
|
+
if not model:
|
|
74
|
+
raise ValueError("Model name cannot be empty")
|
|
75
|
+
return provider, model
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _infer_model_and_provider(
|
|
79
|
+
model: str, *, provider: Optional[str] = None
|
|
80
|
+
) -> Tuple[str, str]:
|
|
81
|
+
if not model.strip():
|
|
82
|
+
raise ValueError("Model name cannot be empty")
|
|
83
|
+
if provider is None and ":" in model:
|
|
84
|
+
provider, model_name = _parse_model_string(model)
|
|
85
|
+
else:
|
|
86
|
+
provider = provider
|
|
87
|
+
model_name = model
|
|
88
|
+
|
|
89
|
+
if not provider:
|
|
90
|
+
providers = _SUPPORTED_PROVIDERS
|
|
91
|
+
raise ValueError(
|
|
92
|
+
"Must specify either:\n"
|
|
93
|
+
"1. A model string in format 'provider:model-name'\n"
|
|
94
|
+
" Example: 'openai:text-embedding-3-small'\n"
|
|
95
|
+
"2. Or explicitly set provider from: "
|
|
96
|
+
f"{providers}"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
if provider not in _SUPPORTED_PROVIDERS:
|
|
100
|
+
raise ValueError(
|
|
101
|
+
f"Provider '{provider}' is not supported.\n"
|
|
102
|
+
f"Supported providers and their required packages:\n"
|
|
103
|
+
f"{_get_provider_list()}"
|
|
104
|
+
)
|
|
105
|
+
return provider, model_name
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@functools.lru_cache(maxsize=len(_SUPPORTED_PROVIDERS))
|
|
109
|
+
def _check_pkg(pkg: str) -> None:
|
|
110
|
+
"""Check if a package is installed."""
|
|
111
|
+
if not util.find_spec(pkg):
|
|
112
|
+
raise ImportError(
|
|
113
|
+
f"Could not import {pkg} python package. "
|
|
114
|
+
f"Please install it with `pip install {pkg}`"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@beta()
|
|
119
|
+
def init_embeddings(
|
|
120
|
+
model: str,
|
|
121
|
+
*,
|
|
122
|
+
provider: Optional[str] = None,
|
|
123
|
+
**kwargs: Any,
|
|
124
|
+
) -> Union[Embeddings, Runnable[Any, List[float]]]:
|
|
125
|
+
"""Initialize an embeddings model from a model name and optional provider.
|
|
126
|
+
|
|
127
|
+
**Note:** Must have the integration package corresponding to the model provider
|
|
128
|
+
installed.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
model: Name of the model to use. Can be either:
|
|
132
|
+
- A model string like "openai:text-embedding-3-small"
|
|
133
|
+
- Just the model name if provider is specified
|
|
134
|
+
provider: Optional explicit provider name. If not specified,
|
|
135
|
+
will attempt to parse from the model string. Supported providers
|
|
136
|
+
and their required packages:
|
|
137
|
+
|
|
138
|
+
{_get_provider_list()}
|
|
139
|
+
|
|
140
|
+
**kwargs: Additional model-specific parameters passed to the embedding model.
|
|
141
|
+
These vary by provider, see the provider-specific documentation for details.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
An Embeddings instance that can generate embeddings for text.
|
|
145
|
+
|
|
146
|
+
Raises:
|
|
147
|
+
ValueError: If the model provider is not supported or cannot be determined
|
|
148
|
+
ImportError: If the required provider package is not installed
|
|
149
|
+
|
|
150
|
+
.. dropdown:: Example Usage
|
|
151
|
+
:open:
|
|
152
|
+
|
|
153
|
+
.. code-block:: python
|
|
154
|
+
|
|
155
|
+
# Using a model string
|
|
156
|
+
model = init_embeddings("openai:text-embedding-3-small")
|
|
157
|
+
model.embed_query("Hello, world!")
|
|
158
|
+
|
|
159
|
+
# Using explicit provider
|
|
160
|
+
model = init_embeddings(
|
|
161
|
+
model="text-embedding-3-small",
|
|
162
|
+
provider="openai"
|
|
163
|
+
)
|
|
164
|
+
model.embed_documents(["Hello, world!", "Goodbye, world!"])
|
|
165
|
+
|
|
166
|
+
# With additional parameters
|
|
167
|
+
model = init_embeddings(
|
|
168
|
+
"openai:text-embedding-3-small",
|
|
169
|
+
api_key="sk-..."
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
.. versionadded:: 0.3.9
|
|
173
|
+
"""
|
|
174
|
+
if not model:
|
|
175
|
+
providers = _SUPPORTED_PROVIDERS.keys()
|
|
176
|
+
raise ValueError(
|
|
177
|
+
"Must specify model name. "
|
|
178
|
+
f"Supported providers are: {', '.join(providers)}"
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
provider, model_name = _infer_model_and_provider(model, provider=provider)
|
|
182
|
+
pkg = _SUPPORTED_PROVIDERS[provider]
|
|
183
|
+
_check_pkg(pkg)
|
|
184
|
+
|
|
185
|
+
if provider == "openai":
|
|
186
|
+
from langchain_openai import OpenAIEmbeddings
|
|
187
|
+
|
|
188
|
+
return OpenAIEmbeddings(model=model_name, **kwargs)
|
|
189
|
+
elif provider == "azure_openai":
|
|
190
|
+
from langchain_openai import AzureOpenAIEmbeddings
|
|
191
|
+
|
|
192
|
+
return AzureOpenAIEmbeddings(model=model_name, **kwargs)
|
|
193
|
+
elif provider == "google_vertexai":
|
|
194
|
+
from langchain_google_vertexai import VertexAIEmbeddings
|
|
195
|
+
|
|
196
|
+
return VertexAIEmbeddings(model=model_name, **kwargs)
|
|
197
|
+
elif provider == "bedrock":
|
|
198
|
+
from langchain_aws import BedrockEmbeddings
|
|
199
|
+
|
|
200
|
+
return BedrockEmbeddings(model_id=model_name, **kwargs)
|
|
201
|
+
elif provider == "cohere":
|
|
202
|
+
from langchain_cohere import CohereEmbeddings
|
|
203
|
+
|
|
204
|
+
return CohereEmbeddings(model=model_name, **kwargs)
|
|
205
|
+
elif provider == "mistralai":
|
|
206
|
+
from langchain_mistralai import MistralAIEmbeddings
|
|
207
|
+
|
|
208
|
+
return MistralAIEmbeddings(model=model_name, **kwargs)
|
|
209
|
+
elif provider == "huggingface":
|
|
210
|
+
from langchain_huggingface import HuggingFaceEmbeddings
|
|
211
|
+
|
|
212
|
+
return HuggingFaceEmbeddings(model_name=model_name, **kwargs)
|
|
213
|
+
else:
|
|
214
|
+
raise ValueError(
|
|
215
|
+
f"Provider '{provider}' is not supported.\n"
|
|
216
|
+
f"Supported providers and their required packages:\n"
|
|
217
|
+
f"{_get_provider_list()}"
|
|
218
|
+
)
|
|
219
|
+
|
|
2
220
|
|
|
3
|
-
|
|
4
|
-
|
|
221
|
+
__all__ = [
|
|
222
|
+
"init_embeddings",
|
|
223
|
+
"Embeddings", # This one is for backwards compatibility
|
|
224
|
+
]
|
|
@@ -161,6 +161,14 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
|
|
|
161
161
|
if isinstance(vectorstore, MongoDBAtlasVectorSearch):
|
|
162
162
|
return MongoDBAtlasTranslator()
|
|
163
163
|
|
|
164
|
+
try:
|
|
165
|
+
from langchain_neo4j import Neo4jVector
|
|
166
|
+
except ImportError:
|
|
167
|
+
pass
|
|
168
|
+
else:
|
|
169
|
+
if isinstance(vectorstore, Neo4jVector):
|
|
170
|
+
return Neo4jTranslator()
|
|
171
|
+
|
|
164
172
|
try:
|
|
165
173
|
from langchain_chroma import Chroma
|
|
166
174
|
except ImportError:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.10
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -11,15 +11,16 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
14
15
|
Requires-Dist: PyYAML (>=5.3)
|
|
15
16
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
16
17
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
17
18
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
18
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
19
|
+
Requires-Dist: langchain-core (>=0.3.22,<0.4.0)
|
|
19
20
|
Requires-Dist: langchain-text-splitters (>=0.3.0,<0.4.0)
|
|
20
21
|
Requires-Dist: langsmith (>=0.1.17,<0.2.0)
|
|
21
22
|
Requires-Dist: numpy (>=1.22.4,<2) ; python_version < "3.12"
|
|
22
|
-
Requires-Dist: numpy (>=1.26.2,<
|
|
23
|
+
Requires-Dist: numpy (>=1.26.2,<3) ; python_version >= "3.12"
|
|
23
24
|
Requires-Dist: pydantic (>=2.7.4,<3.0.0)
|
|
24
25
|
Requires-Dist: requests (>=2,<3)
|
|
25
26
|
Requires-Dist: tenacity (>=8.1.0,!=8.4.0,<10)
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
langchain/__init__.py,sha256=4cqV-N_QJnfjk52DqtR2e72vsmJC1R6PkflvRdLjZQI,13709
|
|
2
2
|
langchain/_api/__init__.py,sha256=0FuHuMNUBMrst1Y1nm5yZzQr2xbLmb7rxMsimqKBXhs,733
|
|
3
|
-
langchain/_api/deprecation.py,sha256=
|
|
3
|
+
langchain/_api/deprecation.py,sha256=GuafNmTTzxJ8eveGcVrPx9pmeu1Oe_d2cTrvOIZYuvo,1082
|
|
4
4
|
langchain/_api/interactive_env.py,sha256=NlnXizhm1TG3l_qKNI0qHJiHkh9q2jRjt5zGJsg_BCA,139
|
|
5
5
|
langchain/_api/module_import.py,sha256=q6UZ1WadWx7curQq8HV8nGwt9WmKd7tJ0mipUyG7ll0,6347
|
|
6
6
|
langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122
|
|
7
7
|
langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996
|
|
9
9
|
langchain/agents/__init__.py,sha256=JQJ3VlqRMRpHbjR-pkzy1yowJkdEmsQEPXTptkyHc-o,6282
|
|
10
|
-
langchain/agents/agent.py,sha256
|
|
10
|
+
langchain/agents/agent.py,sha256=-DP3U47kQGphkOYYcXDS8k90xOTkR3hCMLZ1mE_RAgE,62216
|
|
11
11
|
langchain/agents/agent_iterator.py,sha256=Zr0aikktn-aotTvoaVGJxXIBIkHidIQGkfQIKvovkxg,16454
|
|
12
12
|
langchain/agents/agent_toolkits/__init__.py,sha256=N0ylx2gzwaOqaoHRXQs9jvYNIzrnTM-2rgjNkCU5UII,7370
|
|
13
13
|
langchain/agents/agent_toolkits/ainetwork/__init__.py,sha256=henfKntuAEjG1KoN-Hk1IHy3fFGCYPWLEuZtF2bIdZI,25
|
|
@@ -74,19 +74,19 @@ langchain/agents/agent_toolkits/sql/toolkit.py,sha256=CCVWRJKVuECq-eFRjatJjYsy81
|
|
|
74
74
|
langchain/agents/agent_toolkits/steam/__init__.py,sha256=iOMgxWCt0FTNLMNq0wScgSN_YdBBq-56VM6j0Ud8GpI,21
|
|
75
75
|
langchain/agents/agent_toolkits/steam/toolkit.py,sha256=V0_xpO4mC4rfWBaLyTPW-pKwd-EScTTUnvgtB1sW6Cw,659
|
|
76
76
|
langchain/agents/agent_toolkits/vectorstore/__init__.py,sha256=uT5qVHjIcx3yFkWfxOzbRKL5xwWcMuFGQ-es9O7b2NQ,56
|
|
77
|
-
langchain/agents/agent_toolkits/vectorstore/base.py,sha256=
|
|
77
|
+
langchain/agents/agent_toolkits/vectorstore/base.py,sha256=nahqycVjELCoK8psm49wf3pXmxyNaJUM8FJ5dH8GmXw,9122
|
|
78
78
|
langchain/agents/agent_toolkits/vectorstore/prompt.py,sha256=DndLnLxi9iKjuYKo5E1nscHCOPeCoNcpl8dFHcSltxU,834
|
|
79
79
|
langchain/agents/agent_toolkits/vectorstore/toolkit.py,sha256=dJhQ-0bfPOSliIFickPnJ40iefUPUyL3uV2pVaYP4pA,3210
|
|
80
80
|
langchain/agents/agent_toolkits/xorbits/__init__.py,sha256=LJ-yZ3UKg4vjibzbgMXocR03vcsU_7ZvU7TlScM9RlE,1095
|
|
81
81
|
langchain/agents/agent_toolkits/zapier/__init__.py,sha256=19Hc7HG8DzQfg83qqEbYiXA5FklLoRAEOfIs9JqTjX8,22
|
|
82
82
|
langchain/agents/agent_toolkits/zapier/toolkit.py,sha256=BcFOzvckA9ZBz8HTeWUPFc_eIeifE3fIGE5RBSb7Yls,670
|
|
83
|
-
langchain/agents/agent_types.py,sha256=
|
|
83
|
+
langchain/agents/agent_types.py,sha256=b6WCaXUAXi6CK9vDaKRgGaOM5VIiUI7I5pHEdO2lRCM,1893
|
|
84
84
|
langchain/agents/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
|
-
langchain/agents/chat/base.py,sha256=
|
|
85
|
+
langchain/agents/chat/base.py,sha256=AfUJZB-zsj9sSQ0HK1CiHYODPcZJlEygw-KaVAZCK3s,6590
|
|
86
86
|
langchain/agents/chat/output_parser.py,sha256=0GRXvbNl18xqfSVHzA614pxVBuentIn--vC_QjFctoA,2367
|
|
87
87
|
langchain/agents/chat/prompt.py,sha256=4Ub4oZyIKmJRpWwxOyGcYwlyoK8jJ0kR60jW0lPspC8,1158
|
|
88
88
|
langchain/agents/conversational/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75
|
|
89
|
-
langchain/agents/conversational/base.py,sha256=
|
|
89
|
+
langchain/agents/conversational/base.py,sha256=2raEsfYQ5kw0voUSUF09UjJuSWRgkCYkXvGxznxS7hw,6318
|
|
90
90
|
langchain/agents/conversational/output_parser.py,sha256=OXFq_96ASiAVgz-Ra0UYO_ZxAIDSWaAWEKrXQlHIgVc,1610
|
|
91
91
|
langchain/agents/conversational/prompt.py,sha256=6eiZYQT9liZQr30wAhoqP_2Unph7i-qSqTWqfqdMijI,1859
|
|
92
92
|
langchain/agents/conversational_chat/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75
|
|
@@ -100,14 +100,14 @@ langchain/agents/format_scratchpad/openai_functions.py,sha256=LtIroeeK_SQaxx3yAt
|
|
|
100
100
|
langchain/agents/format_scratchpad/openai_tools.py,sha256=vyBEqvIZ5HCradWWg0weg4bj9R3nr-CpGZqvSua9HnE,166
|
|
101
101
|
langchain/agents/format_scratchpad/tools.py,sha256=nyp_Z9sTnS6FLXSUfAEeZUxhpXcBLck52kdSz0Kas7I,1932
|
|
102
102
|
langchain/agents/format_scratchpad/xml.py,sha256=DtMBd2-Rgi2LdfxXNImYYNcCEy5lxk8ix7-SSCOpWQY,578
|
|
103
|
-
langchain/agents/initialize.py,sha256=
|
|
103
|
+
langchain/agents/initialize.py,sha256=n2mWPXV_ZyTbmUVq4yLSP4prOvXQbFBCau-7doGprGQ,3574
|
|
104
104
|
langchain/agents/json_chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
105
105
|
langchain/agents/json_chat/base.py,sha256=V4jl4Vt_WxTotmSw8Py4rQU9GMnhHpXYC5x-wtRinxg,7994
|
|
106
106
|
langchain/agents/json_chat/prompt.py,sha256=gZukOH50C1llQ-AB2QvtL-PSrczv-a-gJLIPYP8z6vA,551
|
|
107
107
|
langchain/agents/load_tools.py,sha256=uMi1EZtkv2sgyUw6iXMNlCSZlIaju0Rw2svwMtkeW3E,286
|
|
108
108
|
langchain/agents/loading.py,sha256=WRE-hsYnjnv1QPW91Sh9GNIJmVzcMOB-8b6YgmSwqmA,4814
|
|
109
109
|
langchain/agents/mrkl/__init__.py,sha256=Gpz8w88wAF4GSXoGnuYOwZY5rhjFL5WGZvTVQa-YJas,86
|
|
110
|
-
langchain/agents/mrkl/base.py,sha256=
|
|
110
|
+
langchain/agents/mrkl/base.py,sha256=yonYGfgMkTixmrknWROMjwjddiUCgmWEkfIaWVlJdAU,7177
|
|
111
111
|
langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720
|
|
112
112
|
langchain/agents/mrkl/prompt.py,sha256=2dTMP2lAWiLvCtuEijgQRjbKDlbPEnmx77duMwdJ7e4,641
|
|
113
113
|
langchain/agents/openai_assistant/__init__.py,sha256=Xssaqoxrix3hn1gKSOLmDRQzTxAoJk0ProGXmXQe8Mw,114
|
|
@@ -130,7 +130,7 @@ langchain/agents/output_parsers/tools.py,sha256=go3kYKW406Wi8tR8Oqy6YGD26Ab-9PHQ
|
|
|
130
130
|
langchain/agents/output_parsers/xml.py,sha256=2MjxW4nAM4sZN-in3K40_K5DBx6cI2Erb0TZbpSoZIY,1658
|
|
131
131
|
langchain/agents/react/__init__.py,sha256=9RIjjaUDfWnoMEMpV57JQ0CwZZC5Soh357QdKpVIM-4,76
|
|
132
132
|
langchain/agents/react/agent.py,sha256=TWjUeto0zrhf3YtKZ9NoM-kfOu4VR0ircrST_8HLAaM,5568
|
|
133
|
-
langchain/agents/react/base.py,sha256=
|
|
133
|
+
langchain/agents/react/base.py,sha256=26burAhSYnELPwiOCcz24VMSslJm0TlZDiiUbbR6Nv4,6038
|
|
134
134
|
langchain/agents/react/output_parser.py,sha256=bEL3U3mxYGK7_7Lm4GlOq8JKQTgyHFQQIEVUUZjV1qs,1231
|
|
135
135
|
langchain/agents/react/textworld_prompt.py,sha256=b9WDM8pFmqrfAWJ8n6zkxlPlxQI5oHljZ1R9g5y6cRE,1906
|
|
136
136
|
langchain/agents/react/wiki_prompt.py,sha256=iQxqKo5IjsP9manfQwf5sz038Hv_hZH_CMWHtAZYKNM,6127
|
|
@@ -327,7 +327,7 @@ langchain/chains/router/__init__.py,sha256=r66J28FWIORVB5QIZ1d8R_HsiBaV1eQMZDZvM
|
|
|
327
327
|
langchain/chains/router/base.py,sha256=ws6i8C4nk7YWmBqkXBcJ-FybNx4OeDJE-L1IELLK3M4,4517
|
|
328
328
|
langchain/chains/router/embedding_router.py,sha256=hR5hOuwBdMBo_U3lo9SSwBfnVACR0ZpNc-nmDpei5hw,3069
|
|
329
329
|
langchain/chains/router/llm_router.py,sha256=6FQUTXvZ9pekVkPeTNvQsj1jD9JdmfpMkxIPMe4oTMU,6994
|
|
330
|
-
langchain/chains/router/multi_prompt.py,sha256=
|
|
330
|
+
langchain/chains/router/multi_prompt.py,sha256=lLpJsYShzRBnvwtV3AaBbUcB8x6sK1PSxqDveCSC65A,6994
|
|
331
331
|
langchain/chains/router/multi_prompt_prompt.py,sha256=T8UbIuxblnI6Byhw-BMAzwQcbB5ww3N6BiMqMJxS6Jc,1156
|
|
332
332
|
langchain/chains/router/multi_retrieval_prompt.py,sha256=VUYGLWbwGiv03aSMW5sjdGNwsEa9FKgq0RcK5o3lkH4,1079
|
|
333
333
|
langchain/chains/router/multi_retrieval_qa.py,sha256=tjIhHEbOwtF3CLq0qQ8Kd78ao5BXRKZLsm9UlmHrdtQ,4254
|
|
@@ -360,7 +360,7 @@ langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXz
|
|
|
360
360
|
langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863
|
|
361
361
|
langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643
|
|
362
362
|
langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715
|
|
363
|
-
langchain/chat_models/base.py,sha256=
|
|
363
|
+
langchain/chat_models/base.py,sha256=DL0OTo4ro35oyrVLGzRcce5bF34k32C788mUZ4lFEcE,32616
|
|
364
364
|
langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757
|
|
365
365
|
langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633
|
|
366
366
|
langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653
|
|
@@ -572,12 +572,12 @@ langchain/document_transformers/long_context_reorder.py,sha256=uRPWoCzkRvS-rp6L3
|
|
|
572
572
|
langchain/document_transformers/nuclia_text_transform.py,sha256=IIg8LuX116M_PrnqQE7RwN_gy45BBs1UTONtujFEMeU,678
|
|
573
573
|
langchain/document_transformers/openai_functions.py,sha256=UfhBLrya4MRRNWofT87qRKcZI27J8UjZTX9gn005jEA,929
|
|
574
574
|
langchain/document_transformers/xsl/html_chunks_with_headers.xslt,sha256=ti9sT_zWqZQf0aaeX5zT6tfHT1CuUpAVCvzoZWutE0o,6033
|
|
575
|
-
langchain/embeddings/__init__.py,sha256
|
|
575
|
+
langchain/embeddings/__init__.py,sha256=9f7XVRZhOkU1ME7ZV-ODihjMYrEN32EP2kGdXoCRDHI,8390
|
|
576
576
|
langchain/embeddings/aleph_alpha.py,sha256=_yTqGDHsHbh83Zp0MjJ497ilIxkEJm5ccmxOWbJJay4,890
|
|
577
577
|
langchain/embeddings/awa.py,sha256=1cnMiwKKU3ml3Zz5s5WIpcZSlYNVFFGCaeJilrxN8HE,626
|
|
578
578
|
langchain/embeddings/azure_openai.py,sha256=tmICp-NOrxoVFENBy4F_0-c0l3znf8bOtBBo-UZhajg,650
|
|
579
579
|
langchain/embeddings/baidu_qianfan_endpoint.py,sha256=w7BeE53d7o9Y8Xf0cZntmmziih7oBJcmF-jBW70KJlc,662
|
|
580
|
-
langchain/embeddings/base.py,sha256=
|
|
580
|
+
langchain/embeddings/base.py,sha256=YOZ4S2qmbnJ889P3mzvSD_2uQdr3a-U5QMhHIXU8Fo0,7385
|
|
581
581
|
langchain/embeddings/bedrock.py,sha256=tCBm3vcN0B21Ga6KvNwhgJpgjobC2VEcmPApUmwXO4E,638
|
|
582
582
|
langchain/embeddings/bookend.py,sha256=qWaQXZw9Gq11kEdfIO71h1H0NaXqVKm45TiStxd2xaM,638
|
|
583
583
|
langchain/embeddings/cache.py,sha256=69qxrvD4S5gtQvzv72a4sP9cES-KE3fH908C1XRDIDI,10187
|
|
@@ -901,7 +901,7 @@ langchain/retrievers/re_phraser.py,sha256=5H2CAhUNl95wLY2IZf155hlCGr_wgDq7Y1DcYq
|
|
|
901
901
|
langchain/retrievers/remote_retriever.py,sha256=f1jPII31IkNrhkH1LvlUlNLRQNMKNvgE_7qHa3o3P04,659
|
|
902
902
|
langchain/retrievers/self_query/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
903
903
|
langchain/retrievers/self_query/astradb.py,sha256=lxlkYOr8xicH7MNyQKIg3Wc-XwhVpKGBn7maqYyR3Hk,670
|
|
904
|
-
langchain/retrievers/self_query/base.py,sha256=
|
|
904
|
+
langchain/retrievers/self_query/base.py,sha256=6HLzqmChwlM1jiXRWUPkDJMPi8-dvNJPUfsE-Aqputg,13970
|
|
905
905
|
langchain/retrievers/self_query/chroma.py,sha256=F0u_3Id1J1hIYM2D8_oNL2JJVetTFDyqW6fuGhjZ0ew,665
|
|
906
906
|
langchain/retrievers/self_query/dashvector.py,sha256=CJAJQuJYNmw_GUIwwlPx3Scu1uDESTnFF-CzZEwFRRg,685
|
|
907
907
|
langchain/retrievers/self_query/databricks_vector_search.py,sha256=S9V-XRfG6taeW3yRx_NZs4h-R4TiyHLnuJTIZa5rsqM,782
|
|
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1335
1335
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1336
1336
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1337
1337
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1338
|
-
langchain-0.3.
|
|
1339
|
-
langchain-0.3.
|
|
1340
|
-
langchain-0.3.
|
|
1341
|
-
langchain-0.3.
|
|
1342
|
-
langchain-0.3.
|
|
1338
|
+
langchain-0.3.10.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1339
|
+
langchain-0.3.10.dist-info/METADATA,sha256=P5V_UT3Cp3vgRhV8lYufDJquT0X09f0J-UoJ_dFGxPA,7129
|
|
1340
|
+
langchain-0.3.10.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
1341
|
+
langchain-0.3.10.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1342
|
+
langchain-0.3.10.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|