langchain 1.0.0a8__py3-none-any.whl → 1.0.0a10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

langchain/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- __version__ = "1.0.0a6"
5
+ __version__ = "1.0.0a10"
6
6
 
7
7
 
8
8
  def __getattr__(name: str) -> Any: # noqa: ANN401
@@ -1,7 +1,7 @@
1
1
  """langgraph.prebuilt exposes a higher-level API for creating and executing agents and tools."""
2
2
 
3
3
  from langchain.agents.react_agent import AgentState, create_agent
4
- from langchain.agents.tool_node import ToolNode
4
+ from langchain.tools import ToolNode
5
5
 
6
6
  __all__ = [
7
7
  "AgentState",
@@ -169,7 +169,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
169
169
  return None
170
170
 
171
171
  # Process all tool calls that require interrupts
172
- approved_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
172
+ revised_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
173
173
  artificial_tool_messages: list[ToolMessage] = []
174
174
 
175
175
  # Create interrupt requests for all tools that need approval
@@ -210,10 +210,10 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
210
210
  config = self.interrupt_on[tool_call["name"]]
211
211
 
212
212
  if response["type"] == "accept" and config.get("allow_accept"):
213
- approved_tool_calls.append(tool_call)
213
+ revised_tool_calls.append(tool_call)
214
214
  elif response["type"] == "edit" and config.get("allow_edit"):
215
215
  edited_action = response["args"]
216
- approved_tool_calls.append(
216
+ revised_tool_calls.append(
217
217
  ToolCall(
218
218
  type="tool_call",
219
219
  name=edited_action["action"],
@@ -233,6 +233,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
233
233
  tool_call_id=tool_call["id"],
234
234
  status="error",
235
235
  )
236
+ revised_tool_calls.append(tool_call)
236
237
  artificial_tool_messages.append(tool_message)
237
238
  else:
238
239
  allowed_actions = [
@@ -249,9 +250,6 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
249
250
  raise ValueError(msg)
250
251
 
251
252
  # Update the AI message to only include approved tool calls
252
- last_ai_msg.tool_calls = approved_tool_calls
253
+ last_ai_msg.tool_calls = revised_tool_calls
253
254
 
254
- if len(approved_tool_calls) > 0:
255
- return {"messages": [last_ai_msg, *artificial_tool_messages]}
256
-
257
- return {"jump_to": "model", "messages": artificial_tool_messages}
255
+ return {"messages": [last_ai_msg, *artificial_tool_messages]}
@@ -1,6 +1,6 @@
1
1
  """Anthropic prompt caching middleware."""
2
2
 
3
- from typing import Literal
3
+ from typing import Any, Literal
4
4
  from warnings import warn
5
5
 
6
6
  from langchain.agents.middleware.types import AgentMiddleware, ModelRequest
@@ -42,6 +42,7 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
42
42
  def modify_model_request( # type: ignore[override]
43
43
  self,
44
44
  request: ModelRequest,
45
+ state: dict[str, Any], # noqa: ARG002
45
46
  ) -> ModelRequest:
46
47
  """Modify the model request to add cache control blocks."""
47
48
  try:
@@ -45,7 +45,7 @@ __all__ = [
45
45
  "PublicAgentState",
46
46
  ]
47
47
 
48
- JumpTo = Literal["tools", "model", "__end__"]
48
+ JumpTo = Literal["tools", "model", "end"]
49
49
  """Destination to jump to when a middleware node returns."""
50
50
 
51
51
  ResponseT = TypeVar("ResponseT")
@@ -237,7 +237,7 @@ def before_model(
237
237
  AgentState schema.
238
238
  tools: Optional list of additional tools to register with this middleware.
239
239
  jump_to: Optional list of valid jump destinations for conditional edges.
240
- Valid values are: "tools", "model", "__end__"
240
+ Valid values are: "tools", "model", "end"
241
241
  name: Optional name for the generated middleware class. If not provided,
242
242
  uses the decorated function's name.
243
243
 
@@ -260,10 +260,10 @@ def before_model(
260
260
 
261
261
  Advanced usage with runtime and conditional jumping:
262
262
  ```python
263
- @before_model(jump_to=["__end__"])
263
+ @before_model(jump_to=["end"])
264
264
  def conditional_before_model(state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
265
265
  if some_condition(state):
266
- return {"jump_to": "__end__"}
266
+ return {"jump_to": "end"}
267
267
  return None
268
268
  ```
269
269
 
@@ -474,7 +474,7 @@ def after_model(
474
474
  AgentState schema.
475
475
  tools: Optional list of additional tools to register with this middleware.
476
476
  jump_to: Optional list of valid jump destinations for conditional edges.
477
- Valid values are: "tools", "model", "__end__"
477
+ Valid values are: "tools", "model", "end"
478
478
  name: Optional name for the generated middleware class. If not provided,
479
479
  uses the decorated function's name.
480
480
 
@@ -33,8 +33,8 @@ from langchain.agents.structured_output import (
33
33
  StructuredOutputValidationError,
34
34
  ToolStrategy,
35
35
  )
36
- from langchain.agents.tool_node import ToolNode
37
36
  from langchain.chat_models import init_chat_model
37
+ from langchain.tools import ToolNode
38
38
 
39
39
  STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
40
40
 
@@ -505,8 +505,10 @@ def create_agent( # noqa: PLR0915
505
505
  def _resolve_jump(jump_to: JumpTo | None, first_node: str) -> str | None:
506
506
  if jump_to == "model":
507
507
  return first_node
508
- if jump_to:
509
- return jump_to
508
+ if jump_to == "end":
509
+ return "__end__"
510
+ if jump_to == "tools":
511
+ return "tools"
510
512
  return None
511
513
 
512
514
 
@@ -530,29 +532,34 @@ def _make_model_to_tools_edge(
530
532
  first_node: str, structured_output_tools: dict[str, OutputToolBinding], tool_node: ToolNode
531
533
  ) -> Callable[[dict[str, Any]], str | list[Send] | None]:
532
534
  def model_to_tools(state: dict[str, Any]) -> str | list[Send] | None:
535
+ # 1. if there's an explicit jump_to in the state, use it
533
536
  if jump_to := state.get("jump_to"):
534
537
  return _resolve_jump(jump_to, first_node)
535
538
 
536
539
  last_ai_message, tool_messages = _fetch_last_ai_and_tool_messages(state["messages"])
537
540
  tool_message_ids = [m.tool_call_id for m in tool_messages]
538
541
 
542
+ # 2. if the model hasn't called any tools, jump to END
543
+ # this is the classic exit condition for an agent loop
544
+ if len(last_ai_message.tool_calls) == 0:
545
+ return END
546
+
539
547
  pending_tool_calls = [
540
548
  c
541
549
  for c in last_ai_message.tool_calls
542
550
  if c["id"] not in tool_message_ids and c["name"] not in structured_output_tools
543
551
  ]
544
552
 
553
+ # 3. if there are pending tool calls, jump to the tool node
545
554
  if pending_tool_calls:
546
- # imo we should not be injecting state, store here,
547
- # this should be done by the tool node itself ideally but this is a consequence
548
- # of using Send w/ tool calls directly which allows more intuitive interrupt behavior
549
- # largely internal so can be fixed later
550
555
  pending_tool_calls = [
551
556
  tool_node.inject_tool_args(call, state, None) for call in pending_tool_calls
552
557
  ]
553
558
  return [Send("tools", [tool_call]) for tool_call in pending_tool_calls]
554
559
 
555
- return END
560
+ # 4. AIMessage has tool calls, but there are no pending tool calls
561
+ # which suggests the injection of artificial tool messages. jump to the first node
562
+ return first_node
556
563
 
557
564
  return model_to_tools
558
565
 
@@ -602,7 +609,7 @@ def _add_middleware_edge(
602
609
 
603
610
  destinations = [default_destination]
604
611
 
605
- if "__end__" in jump_to:
612
+ if "end" in jump_to:
606
613
  destinations.append(END)
607
614
  if "tools" in jump_to:
608
615
  destinations.append("tools")
@@ -54,8 +54,8 @@ from langchain.agents.structured_output import (
54
54
  StructuredOutputValidationError,
55
55
  ToolStrategy,
56
56
  )
57
- from langchain.agents.tool_node import ToolNode
58
57
  from langchain.chat_models import init_chat_model
58
+ from langchain.tools import ToolNode
59
59
 
60
60
  if TYPE_CHECKING:
61
61
  from langchain_core.tools import BaseTool
@@ -971,7 +971,7 @@ def create_agent( # noqa: D417
971
971
  return model.bind_tools(tools)
972
972
  ```
973
973
 
974
- !!! note "Dynamic Model Requirements"
974
+ .. note::
975
975
  Ensure returned models have appropriate tools bound via
976
976
  `.bind_tools()` and support required functionality. Bound tools
977
977
  must be a subset of those specified in the `tools` parameter.
@@ -985,10 +985,10 @@ def create_agent( # noqa: D417
985
985
  of the list of messages in state["messages"].
986
986
  - SystemMessage: this is added to the beginning of the list of messages
987
987
  in state["messages"].
988
- - Callable: This function should take in full graph state and the output is then passed
989
- to the language model.
990
- - Runnable: This runnable should take in full graph state and the output is then passed
991
- to the language model.
988
+ - Callable: This function should take in full graph state and the output is
989
+ then passed to the language model.
990
+ - Runnable: This runnable should take in full graph state and the output is
991
+ then passed to the language model.
992
992
 
993
993
  response_format: An optional UsingToolStrategy configuration for structured responses.
994
994
 
@@ -1002,7 +1002,8 @@ def create_agent( # noqa: D417
1002
1002
 
1003
1003
  - schemas: A sequence of ResponseSchema objects that define
1004
1004
  the structured output format
1005
- - tool_choice: Either "required" or "auto" to control when structured output is used
1005
+ - tool_choice: Either "required" or "auto" to control when structured
1006
+ output is used
1006
1007
 
1007
1008
  Each ResponseSchema contains:
1008
1009
 
@@ -1011,12 +1012,12 @@ def create_agent( # noqa: D417
1011
1012
  - description: Optional custom description (defaults to model docstring)
1012
1013
  - strict: Whether to enforce strict validation
1013
1014
 
1014
- !!! Important
1015
+ .. important::
1015
1016
  `response_format` requires the model to support tool calling
1016
1017
 
1017
- !!! Note
1018
- Structured responses are handled directly in the model call node via tool calls,
1019
- eliminating the need for separate structured response nodes.
1018
+ .. note::
1019
+ Structured responses are handled directly in the model call node via
1020
+ tool calls, eliminating the need for separate structured response nodes.
1020
1021
 
1021
1022
  pre_model_hook: An optional node to add before the `agent` node
1022
1023
  (i.e., the node that calls the LLM).
@@ -1037,12 +1038,12 @@ def create_agent( # noqa: D417
1037
1038
  }
1038
1039
  ```
1039
1040
 
1040
- !!! Important
1041
+ .. important::
1041
1042
  At least one of `messages` or `llm_input_messages` MUST be provided
1042
1043
  and will be used as an input to the `agent` node.
1043
1044
  The rest of the keys will be added to the graph state.
1044
1045
 
1045
- !!! Warning
1046
+ .. warning::
1046
1047
  If you are returning `messages` in the pre-model hook,
1047
1048
  you should OVERWRITE the `messages` key by doing the following:
1048
1049
 
@@ -1059,7 +1060,7 @@ def create_agent( # noqa: D417
1059
1060
  Post-model hook must be a callable or a runnable that takes in
1060
1061
  current graph state and returns a state update.
1061
1062
 
1062
- !!! Note
1063
+ .. note::
1063
1064
  Only available with `version="v2"`.
1064
1065
  state_schema: An optional state schema that defines graph state.
1065
1066
  Must have `messages` and `remaining_steps` keys.
@@ -1092,7 +1093,7 @@ def create_agent( # noqa: D417
1092
1093
  another graph as a subgraph node -
1093
1094
  particularly useful for building multi-agent systems.
1094
1095
 
1095
- !!! warning "`config_schema` Deprecated"
1096
+ .. warning::
1096
1097
  The `config_schema` parameter is deprecated in v0.6.0 and support will be removed in v2.0.0.
1097
1098
  Please use `context_schema` instead to specify the schema for run-scoped context.
1098
1099
 
@@ -8,10 +8,19 @@ from langchain_core.tools import (
8
8
  tool,
9
9
  )
10
10
 
11
+ from langchain.tools.tool_node import (
12
+ InjectedState,
13
+ InjectedStore,
14
+ ToolNode,
15
+ )
16
+
11
17
  __all__ = [
12
18
  "BaseTool",
19
+ "InjectedState",
20
+ "InjectedStore",
13
21
  "InjectedToolArg",
14
22
  "InjectedToolCallId",
15
23
  "ToolException",
24
+ "ToolNode",
16
25
  "tool",
17
26
  ]
@@ -21,7 +21,7 @@ Key Components:
21
21
  Typical Usage:
22
22
  ```python
23
23
  from langchain_core.tools import tool
24
- from langchain.agents import ToolNode
24
+ from langchain.tools import ToolNode
25
25
 
26
26
 
27
27
  @tool
@@ -344,7 +344,7 @@ class ToolNode(RunnableCallable):
344
344
  Basic usage:
345
345
 
346
346
  ```python
347
- from langchain.agents import ToolNode
347
+ from langchain.tools import ToolNode
348
348
  from langchain_core.tools import tool
349
349
 
350
350
  @tool
@@ -359,7 +359,7 @@ class ToolNode(RunnableCallable):
359
359
 
360
360
  ```python
361
361
  from typing_extensions import Annotated
362
- from langgraph.agents.tool_node import InjectedState
362
+ from langchain.tools import InjectedState
363
363
 
364
364
  @tool
365
365
  def context_tool(query: str, state: Annotated[dict, InjectedState]) -> str:
@@ -885,7 +885,8 @@ def tools_condition(
885
885
 
886
886
  ```python
887
887
  from langgraph.graph import StateGraph
888
- from langgraph.agents.tool_node import ToolNode, tools_condition
888
+ from langchain.tools import ToolNode
889
+ from langchain.tools.tool_node import tools_condition
889
890
  from typing_extensions import TypedDict
890
891
 
891
892
 
@@ -950,9 +951,7 @@ class InjectedState(InjectedToolArg):
950
951
  from typing_extensions import Annotated, TypedDict
951
952
 
952
953
  from langchain_core.messages import BaseMessage, AIMessage
953
- from langchain_core.tools import tool
954
-
955
- from langgraph.agents.tool_node import InjectedState, ToolNode
954
+ from langchain.tools import InjectedState, ToolNode, tool
956
955
 
957
956
 
958
957
  class AgentState(TypedDict):
@@ -1020,15 +1019,14 @@ class InjectedStore(InjectedToolArg):
1020
1019
  for maintaining context, user preferences, or any other data that needs to
1021
1020
  persist beyond individual workflow executions.
1022
1021
 
1023
- !!! Warning
1022
+ .. warning::
1024
1023
  `InjectedStore` annotation requires `langchain-core >= 0.3.8`
1025
1024
 
1026
1025
  Example:
1027
1026
  ```python
1028
1027
  from typing_extensions import Annotated
1029
- from langchain_core.tools import tool
1030
1028
  from langgraph.store.memory import InMemoryStore
1031
- from langgraph.agents.tool_node import InjectedStore, ToolNode
1029
+ from langchain.tools import InjectedStore, ToolNode, tool
1032
1030
 
1033
1031
  @tool
1034
1032
  def save_preference(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 1.0.0a8
3
+ Version: 1.0.0a10
4
4
  Summary: Building applications with LLMs through composability
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
@@ -9,7 +9,7 @@ Project-URL: repository, https://github.com/langchain-ai/langchain
9
9
  Requires-Python: <4.0.0,>=3.10.0
10
10
  Requires-Dist: langchain-core<2.0.0,>=0.3.75
11
11
  Requires-Dist: langchain-text-splitters<2.0.0,>=0.3.11
12
- Requires-Dist: langgraph<2.0.0,>=0.6.7
12
+ Requires-Dist: langgraph==1.0.0a4
13
13
  Requires-Dist: pydantic<3.0.0,>=2.7.4
14
14
  Provides-Extra: community
15
15
  Requires-Dist: langchain-community; extra == "community"
@@ -1,26 +1,25 @@
1
- langchain-1.0.0a8.dist-info/METADATA,sha256=slSRaiJXGZdoNrLCLCYpJqqaPdM0keHqtv2aCBHLC7w,6259
2
- langchain-1.0.0a8.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- langchain-1.0.0a8.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- langchain-1.0.0a8.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
5
- langchain/__init__.py,sha256=Z6r4MjNaC6DSyiMgFSRmi8EhizelVIOb_CVJJRAVjDc,604
1
+ langchain-1.0.0a10.dist-info/METADATA,sha256=1--JBKKepNpXt-uDW1W7YUlJXTIKzRgF1ixdCchWKFM,6255
2
+ langchain-1.0.0a10.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ langchain-1.0.0a10.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ langchain-1.0.0a10.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
5
+ langchain/__init__.py,sha256=F9IyeZPtTvxD-i2stOzPvGbIUEkUwc-qEFBGM8jDI2U,605
6
6
  langchain/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  langchain/_internal/_documents.py,sha256=z9wAPukoASOMw4WTFFBKCCZYsvsKbo-Cq6CeHjdq9eE,1045
8
8
  langchain/_internal/_lazy_import.py,sha256=S_iSAxGvW7lVcUQYgi45KG8XhWZzkORsZ_olPMZPlqU,1258
9
9
  langchain/_internal/_prompts.py,sha256=MJ11D0MHbxb3guVGWKBcdjH3QdfBXWB2FO0g00wympc,5912
10
10
  langchain/_internal/_typing.py,sha256=lV5NUl0SggnjlJ1CrfK-hna2OMyH02vZwrSmub2FDc0,1657
11
11
  langchain/_internal/_utils.py,sha256=lG8X9muiRAWtQjRPudq-1x-wHbk0J3spu_rYZckVdYs,251
12
- langchain/agents/__init__.py,sha256=NG2S3dic9L3i4sAD9mpgaTv6Dl4L3u45xxK6jn-I4W8,281
12
+ langchain/agents/__init__.py,sha256=a-h5B7Fx19y6CMSHRJh6t7c3Y0MLUKw11Jx5Hk10Als,270
13
13
  langchain/agents/_internal/__init__.py,sha256=5nNBeaeQIvv9IOQjY4_aNW8pffWzMXQgi0b6Nx-WghM,37
14
14
  langchain/agents/_internal/_typing.py,sha256=JoWa-KL5uLNeq6yrm56wnIvhDeFnCt2fTzgUcj5zWy4,270
15
15
  langchain/agents/middleware/__init__.py,sha256=-NzMTmD5ogpzlsqHGjv6SnTrfXqU3vTahGUoGDk299U,511
16
- langchain/agents/middleware/human_in_the_loop.py,sha256=_6THKNzp1dvYBwBLdnZ9PXsHJP3uedn4A60ZON4xlvI,10301
17
- langchain/agents/middleware/prompt_caching.py,sha256=QLoWdd9jUiXAytGqbXW0I_Mg8WgrgTBO8gOZ-s8Bx8g,3081
16
+ langchain/agents/middleware/human_in_the_loop.py,sha256=RH5uuPLdJBdLivLmsx-KpgkzjAq8tIeANLJa74u4h_s,10230
17
+ langchain/agents/middleware/prompt_caching.py,sha256=AwcsTUSIE0Ur2TeovfX-KTznv7GJdwbYFnIfSE60Wds,3133
18
18
  langchain/agents/middleware/summarization.py,sha256=qqEqAuJXQ5rfewhFHftHLnrX8jhdMu9dPfz0akhzfuc,10281
19
- langchain/agents/middleware/types.py,sha256=a9B6Ihx12mNTroopL1SqHxsO51ZfSkxdtkPeZXw8EJc,18606
20
- langchain/agents/middleware_agent.py,sha256=fncjAFNsqZqEkDYSsBfq-goxN4GqNlQixirWRBguXhs,23847
21
- langchain/agents/react_agent.py,sha256=6ZNI2dp0hTL7hTm7ao-HkQ3hmVvBQuFu9pJz0PSK_eg,49712
19
+ langchain/agents/middleware/types.py,sha256=68VmyBQ5294Yi6SwOC9-LqdkdRLg8q3gP1FYs5XCpV0,18586
20
+ langchain/agents/middleware_agent.py,sha256=BWuqQy4ACIZkQy89yRqnlg7E6vC2iqxbSQgw87NRRY4,24091
21
+ langchain/agents/react_agent.py,sha256=SL9KxKEqLdRcyrt2kPHMbnYvqizENL1uLxNzjDPDjpk,49668
22
22
  langchain/agents/structured_output.py,sha256=QWNafJx7au_jJawJgIfovnDoP8Z9mLxDZNvDX_1RRJ0,13327
23
- langchain/agents/tool_node.py,sha256=QabTfIi8nGrwfzaSOeWfyHos6sgXjFTdRXexQG7u2HE,46596
24
23
  langchain/chat_models/__init__.py,sha256=PTq9qskQEbqXYAcUUxUXDsugOcwISgFhv4w40JgkbgU,181
25
24
  langchain/chat_models/base.py,sha256=kzEGebhYKoAcRIEeIPRmjwMx8F6y4Xw2TSC52W9BsWI,34754
26
25
  langchain/documents/__init__.py,sha256=DjuBCy1TQbem4Vz8SsCcGAbZeFwW5KgGPvDrA8e9oGA,94
@@ -34,5 +33,6 @@ langchain/storage/encoder_backed.py,sha256=4h_4ZgP_B9p1lwVMNdBgpEIC7UDAp--ncp9wm
34
33
  langchain/storage/exceptions.py,sha256=Fl_8tON3KmByBKwXtno5WSj0-c2RiZxnhw3gv5aS2T8,114
35
34
  langchain/storage/in_memory.py,sha256=ozrmu0EtaJJVSAzK_u7nzxWpr9OOscWkANHSg-qIVYQ,369
36
35
  langchain/text_splitter.py,sha256=yxWs4secpnkfK6VZDiNJNdlYOrRZ18RQZj1S3xNQ73A,1554
37
- langchain/tools/__init__.py,sha256=NYQzLxW2iI5Twu3voefVC-dJEI4Wgh7jC311CQEpvZs,252
38
- langchain-1.0.0a8.dist-info/RECORD,,
36
+ langchain/tools/__init__.py,sha256=tWlUqT7jrnf1ouhMctuUkaYBWEuOPD3JQX4Y8uTHk5w,405
37
+ langchain/tools/tool_node.py,sha256=_qwqqTbjNh3edcd3G50ZXHsqb0rg_LDlhGWYGiAUStw,46515
38
+ langchain-1.0.0a10.dist-info/RECORD,,