langchain 1.0.0a11__py3-none-any.whl → 1.0.0a12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +1 -1
- langchain/agents/factory.py +14 -14
- {langchain-1.0.0a11.dist-info → langchain-1.0.0a12.dist-info}/METADATA +1 -1
- {langchain-1.0.0a11.dist-info → langchain-1.0.0a12.dist-info}/RECORD +6 -6
- {langchain-1.0.0a11.dist-info → langchain-1.0.0a12.dist-info}/WHEEL +0 -0
- {langchain-1.0.0a11.dist-info → langchain-1.0.0a12.dist-info}/licenses/LICENSE +0 -0
langchain/__init__.py
CHANGED
langchain/agents/factory.py
CHANGED
|
@@ -219,7 +219,7 @@ def create_agent( # noqa: PLR0915
|
|
|
219
219
|
model: The language model for the agent. Can be a string identifier
|
|
220
220
|
(e.g., ``"openai:gpt-4"``), a chat model instance (e.g., ``ChatOpenAI()``).
|
|
221
221
|
tools: A list of tools, dicts, or callables. If ``None`` or an empty list,
|
|
222
|
-
the agent will consist of a
|
|
222
|
+
the agent will consist of a model node without a tool calling loop.
|
|
223
223
|
system_prompt: An optional system prompt for the LLM. If provided as a string,
|
|
224
224
|
it will be converted to a SystemMessage and added to the beginning
|
|
225
225
|
of the message list.
|
|
@@ -608,7 +608,7 @@ def create_agent( # noqa: PLR0915
|
|
|
608
608
|
)
|
|
609
609
|
return request.model.bind(**request.model_settings), None
|
|
610
610
|
|
|
611
|
-
def
|
|
611
|
+
def model_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
|
|
612
612
|
"""Sync model request handler with sequential middleware processing."""
|
|
613
613
|
request = ModelRequest(
|
|
614
614
|
model=model,
|
|
@@ -674,7 +674,7 @@ def create_agent( # noqa: PLR0915
|
|
|
674
674
|
msg = f"Maximum retry attempts ({max_attempts}) exceeded"
|
|
675
675
|
raise RuntimeError(msg)
|
|
676
676
|
|
|
677
|
-
async def
|
|
677
|
+
async def amodel_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
|
|
678
678
|
"""Async model request handler with sequential middleware processing."""
|
|
679
679
|
request = ModelRequest(
|
|
680
680
|
model=model,
|
|
@@ -724,7 +724,7 @@ def create_agent( # noqa: PLR0915
|
|
|
724
724
|
raise RuntimeError(msg)
|
|
725
725
|
|
|
726
726
|
# Use sync or async based on model capabilities
|
|
727
|
-
graph.add_node("
|
|
727
|
+
graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
|
|
728
728
|
|
|
729
729
|
# Only add tools node if we have tools
|
|
730
730
|
if tool_node is not None:
|
|
@@ -808,27 +808,27 @@ def create_agent( # noqa: PLR0915
|
|
|
808
808
|
after_agent_node = RunnableCallable(sync_after_agent, async_after_agent, trace=False)
|
|
809
809
|
graph.add_node(f"{m.name}.after_agent", after_agent_node, input_schema=state_schema)
|
|
810
810
|
|
|
811
|
-
# Determine the entry node (runs once at start): before_agent -> before_model ->
|
|
811
|
+
# Determine the entry node (runs once at start): before_agent -> before_model -> model
|
|
812
812
|
if middleware_w_before_agent:
|
|
813
813
|
entry_node = f"{middleware_w_before_agent[0].name}.before_agent"
|
|
814
814
|
elif middleware_w_before_model:
|
|
815
815
|
entry_node = f"{middleware_w_before_model[0].name}.before_model"
|
|
816
816
|
else:
|
|
817
|
-
entry_node = "
|
|
817
|
+
entry_node = "model"
|
|
818
818
|
|
|
819
819
|
# Determine the loop entry node (beginning of agent loop, excludes before_agent)
|
|
820
820
|
# This is where tools will loop back to for the next iteration
|
|
821
821
|
if middleware_w_before_model:
|
|
822
822
|
loop_entry_node = f"{middleware_w_before_model[0].name}.before_model"
|
|
823
823
|
else:
|
|
824
|
-
loop_entry_node = "
|
|
824
|
+
loop_entry_node = "model"
|
|
825
825
|
|
|
826
826
|
# Determine the loop exit node (end of each iteration, can run multiple times)
|
|
827
|
-
# This is after_model or
|
|
827
|
+
# This is after_model or model, but NOT after_agent
|
|
828
828
|
if middleware_w_after_model:
|
|
829
829
|
loop_exit_node = f"{middleware_w_after_model[0].name}.after_model"
|
|
830
830
|
else:
|
|
831
|
-
loop_exit_node = "
|
|
831
|
+
loop_exit_node = "model"
|
|
832
832
|
|
|
833
833
|
# Determine the exit node (runs once at end): after_agent or END
|
|
834
834
|
if middleware_w_after_agent:
|
|
@@ -860,7 +860,7 @@ def create_agent( # noqa: PLR0915
|
|
|
860
860
|
_make_model_to_model_edge(loop_entry_node, exit_node),
|
|
861
861
|
[loop_entry_node, exit_node],
|
|
862
862
|
)
|
|
863
|
-
elif loop_exit_node == "
|
|
863
|
+
elif loop_exit_node == "model":
|
|
864
864
|
# If no tools and no after_model, go directly to exit_node
|
|
865
865
|
graph.add_edge(loop_exit_node, exit_node)
|
|
866
866
|
# No tools but we have after_model - connect after_model to exit_node
|
|
@@ -883,7 +883,7 @@ def create_agent( # noqa: PLR0915
|
|
|
883
883
|
loop_entry_node,
|
|
884
884
|
can_jump_to=_get_can_jump_to(m1, "before_agent"),
|
|
885
885
|
)
|
|
886
|
-
# Connect last before_agent to loop_entry_node (before_model or
|
|
886
|
+
# Connect last before_agent to loop_entry_node (before_model or model)
|
|
887
887
|
_add_middleware_edge(
|
|
888
888
|
graph,
|
|
889
889
|
f"{middleware_w_before_agent[-1].name}.before_agent",
|
|
@@ -902,18 +902,18 @@ def create_agent( # noqa: PLR0915
|
|
|
902
902
|
loop_entry_node,
|
|
903
903
|
can_jump_to=_get_can_jump_to(m1, "before_model"),
|
|
904
904
|
)
|
|
905
|
-
# Go directly to
|
|
905
|
+
# Go directly to model after the last before_model
|
|
906
906
|
_add_middleware_edge(
|
|
907
907
|
graph,
|
|
908
908
|
f"{middleware_w_before_model[-1].name}.before_model",
|
|
909
|
-
"
|
|
909
|
+
"model",
|
|
910
910
|
loop_entry_node,
|
|
911
911
|
can_jump_to=_get_can_jump_to(middleware_w_before_model[-1], "before_model"),
|
|
912
912
|
)
|
|
913
913
|
|
|
914
914
|
# Add after_model middleware edges
|
|
915
915
|
if middleware_w_after_model:
|
|
916
|
-
graph.add_edge("
|
|
916
|
+
graph.add_edge("model", f"{middleware_w_after_model[-1].name}.after_model")
|
|
917
917
|
for idx in range(len(middleware_w_after_model) - 1, 0, -1):
|
|
918
918
|
m1 = middleware_w_after_model[idx]
|
|
919
919
|
m2 = middleware_w_after_model[idx - 1]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.0a12
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
|
|
6
6
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
langchain/__init__.py,sha256=
|
|
1
|
+
langchain/__init__.py,sha256=r-dYWvMexpt3NwZEBukjX-wFF7i2fNMhSOW4Lry-rnQ,64
|
|
2
2
|
langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
langchain/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
langchain/_internal/_documents.py,sha256=uQcKc1cslujQzmP1yiyN6Z371BsOkkrZ4bVidsVvgs0,1044
|
|
@@ -7,7 +7,7 @@ langchain/_internal/_prompts.py,sha256=Ldu09Vj04yO9IHssbYps19sw_7uYOm_biXTF8hN6I
|
|
|
7
7
|
langchain/_internal/_typing.py,sha256=T3Nl8oi_SSZ1YMIxNwZm_6TJsiRAiYYAm-dSznGog84,1656
|
|
8
8
|
langchain/_internal/_utils.py,sha256=lG8X9muiRAWtQjRPudq-1x-wHbk0J3spu_rYZckVdYs,251
|
|
9
9
|
langchain/agents/__init__.py,sha256=x85V7MqddVSrraoirGHplPMzEz9Lha-vL9fKjXCS7lA,258
|
|
10
|
-
langchain/agents/factory.py,sha256=
|
|
10
|
+
langchain/agents/factory.py,sha256=n7TFqCy3iqxFnAcHeFaPKd-RPOCw10q3CmkfC_JJnBA,46906
|
|
11
11
|
langchain/agents/structured_output.py,sha256=msf-ClqDnMfJ-oGHqjwEyth860tMnx58GLTvqJijqg8,13686
|
|
12
12
|
langchain/agents/_internal/__init__.py,sha256=5nNBeaeQIvv9IOQjY4_aNW8pffWzMXQgi0b6Nx-WghM,37
|
|
13
13
|
langchain/agents/_internal/_typing.py,sha256=JoWa-KL5uLNeq6yrm56wnIvhDeFnCt2fTzgUcj5zWy4,270
|
|
@@ -37,7 +37,7 @@ langchain/storage/exceptions.py,sha256=Fl_8tON3KmByBKwXtno5WSj0-c2RiZxnhw3gv5aS2
|
|
|
37
37
|
langchain/storage/in_memory.py,sha256=ozrmu0EtaJJVSAzK_u7nzxWpr9OOscWkANHSg-qIVYQ,369
|
|
38
38
|
langchain/tools/__init__.py,sha256=tWlUqT7jrnf1ouhMctuUkaYBWEuOPD3JQX4Y8uTHk5w,405
|
|
39
39
|
langchain/tools/tool_node.py,sha256=wDJr_LTCN295GrjZi1iKvu_xYj05nbeanqo78jNJaDU,46514
|
|
40
|
-
langchain-1.0.
|
|
41
|
-
langchain-1.0.
|
|
42
|
-
langchain-1.0.
|
|
43
|
-
langchain-1.0.
|
|
40
|
+
langchain-1.0.0a12.dist-info/METADATA,sha256=oKxFoU8_xPZNRc1jm3Y3YLlxdiQ5TbQ_hSDbaol10Ps,5987
|
|
41
|
+
langchain-1.0.0a12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
42
|
+
langchain-1.0.0a12.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
43
|
+
langchain-1.0.0a12.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|