langchain-dev-utils 1.2.13__py3-none-any.whl → 1.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_dev_utils/__init__.py +1 -1
- langchain_dev_utils/agents/middleware/model_router.py +9 -4
- langchain_dev_utils-1.2.15.dist-info/METADATA +102 -0
- {langchain_dev_utils-1.2.13.dist-info → langchain_dev_utils-1.2.15.dist-info}/RECORD +6 -6
- langchain_dev_utils-1.2.13.dist-info/METADATA +0 -345
- {langchain_dev_utils-1.2.13.dist-info → langchain_dev_utils-1.2.15.dist-info}/WHEEL +0 -0
- {langchain_dev_utils-1.2.13.dist-info → langchain_dev_utils-1.2.15.dist-info}/licenses/LICENSE +0 -0
langchain_dev_utils/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.2.
|
|
1
|
+
__version__ = "1.2.15"
|
|
@@ -19,6 +19,7 @@ class ModelDict(TypedDict):
|
|
|
19
19
|
model_description: str
|
|
20
20
|
tools: NotRequired[list[BaseTool | dict[str, Any]]]
|
|
21
21
|
model_kwargs: NotRequired[dict[str, Any]]
|
|
22
|
+
model_instance: NotRequired[BaseChatModel]
|
|
22
23
|
model_system_prompt: NotRequired[str]
|
|
23
24
|
|
|
24
25
|
|
|
@@ -76,7 +77,7 @@ class ModelRouterMiddleware(AgentMiddleware):
|
|
|
76
77
|
model name or a BaseChatModel instance
|
|
77
78
|
model_list: List of available routing models, each containing model_name,
|
|
78
79
|
model_description, tools(Optional), model_kwargs(Optional),
|
|
79
|
-
model_system_prompt(Optional)
|
|
80
|
+
model_instance(Optional), model_system_prompt(Optional)
|
|
80
81
|
router_prompt: Routing prompt template, uses default template if None
|
|
81
82
|
|
|
82
83
|
Examples:
|
|
@@ -155,6 +156,7 @@ class ModelRouterMiddleware(AgentMiddleware):
|
|
|
155
156
|
"tools": item.get("tools", None),
|
|
156
157
|
"kwargs": item.get("model_kwargs", None),
|
|
157
158
|
"system_prompt": item.get("model_system_prompt", None),
|
|
159
|
+
"model_instance": item.get("model_instance", None),
|
|
158
160
|
}
|
|
159
161
|
for item in self.model_list
|
|
160
162
|
}
|
|
@@ -163,10 +165,13 @@ class ModelRouterMiddleware(AgentMiddleware):
|
|
|
163
165
|
override_kwargs = {}
|
|
164
166
|
if select_model_name != "default-model" and select_model_name in model_dict:
|
|
165
167
|
model_values = model_dict.get(select_model_name, {})
|
|
166
|
-
if model_values["
|
|
167
|
-
model =
|
|
168
|
+
if model_values["model_instance"] is not None:
|
|
169
|
+
model = model_values["model_instance"]
|
|
168
170
|
else:
|
|
169
|
-
|
|
171
|
+
if model_values["kwargs"] is not None:
|
|
172
|
+
model = load_chat_model(select_model_name, **model_values["kwargs"])
|
|
173
|
+
else:
|
|
174
|
+
model = load_chat_model(select_model_name)
|
|
170
175
|
override_kwargs["model"] = model
|
|
171
176
|
if model_values["tools"] is not None:
|
|
172
177
|
override_kwargs["tools"] = model_values["tools"]
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: langchain-dev-utils
|
|
3
|
+
Version: 1.2.15
|
|
4
|
+
Summary: A practical utility library for LangChain and LangGraph development
|
|
5
|
+
Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
|
|
6
|
+
Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
|
|
7
|
+
Project-URL: documentation, https://tbice123123.github.io/langchain-dev-utils
|
|
8
|
+
Author-email: tiebingice <tiebingice123@outlook.com>
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Python: >=3.11
|
|
11
|
+
Requires-Dist: langchain>=1.1.0
|
|
12
|
+
Requires-Dist: langgraph>=1.0.0
|
|
13
|
+
Provides-Extra: standard
|
|
14
|
+
Requires-Dist: json-repair>=0.53.1; extra == 'standard'
|
|
15
|
+
Requires-Dist: langchain-openai; extra == 'standard'
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
|
|
18
|
+
# 🦜️🧰 langchain-dev-utils
|
|
19
|
+
|
|
20
|
+
<p align="center">
|
|
21
|
+
<em>🚀 High-efficiency toolkit designed for LangChain and LangGraph developers</em>
|
|
22
|
+
</p>
|
|
23
|
+
|
|
24
|
+
<p align="center">
|
|
25
|
+
📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
|
|
26
|
+
<a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
|
|
27
|
+
</p>
|
|
28
|
+
|
|
29
|
+
[](https://pypi.org/project/langchain-dev-utils/)
|
|
30
|
+
[](https://opensource.org/licenses/MIT)
|
|
31
|
+
[](https://www.python.org/downloads)
|
|
32
|
+
[](https://pepy.tech/project/langchain-dev-utils)
|
|
33
|
+
[](https://tbice123123.github.io/langchain-dev-utils)
|
|
34
|
+
|
|
35
|
+
> This is the English version. For the Chinese version, please visit [中文版本](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
|
|
36
|
+
|
|
37
|
+
## ✨ Why choose langchain-dev-utils?
|
|
38
|
+
|
|
39
|
+
Tired of writing repetitive code in LangChain development? `langchain-dev-utils` is the solution you need! This lightweight yet powerful toolkit is designed to enhance the development experience of LangChain and LangGraph, helping you:
|
|
40
|
+
|
|
41
|
+
- ⚡ **Boost development efficiency** - Reduce boilerplate code, allowing you to focus on core functionality
|
|
42
|
+
- 🧩 **Simplify complex workflows** - Easily manage multi-model, multi-tool, and multi-agent applications
|
|
43
|
+
- 🔧 **Enhance code quality** - Improve consistency and readability, reducing maintenance costs
|
|
44
|
+
- 🎯 **Accelerate prototype development** - Quickly implement ideas, iterate and validate faster
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
## 🎯 Core Features
|
|
48
|
+
|
|
49
|
+
- **🔌 Unified model management** - Specify model providers through strings, easily switch and combine different models
|
|
50
|
+
- **💬 Flexible message handling** - Support for chain-of-thought concatenation, streaming processing, and message formatting
|
|
51
|
+
- **🛠️ Powerful tool calling** - Built-in tool call detection, parameter parsing, and human review functionality
|
|
52
|
+
- **🤖 Efficient Agent development** - Simplify agent creation process, expand more common middleware
|
|
53
|
+
- **📊 Flexible state graph composition** - Support for serial and parallel composition of multiple StateGraphs
|
|
54
|
+
|
|
55
|
+
## ⚡ Quick Start
|
|
56
|
+
|
|
57
|
+
**1. Install `langchain-dev-utils`**
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
pip install -U "langchain-dev-utils[standard]"
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
**2. Start using**
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
from langchain.tools import tool
|
|
67
|
+
from langchain_core.messages import HumanMessage
|
|
68
|
+
from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
|
|
69
|
+
from langchain_dev_utils.agents import create_agent
|
|
70
|
+
|
|
71
|
+
# Register model provider
|
|
72
|
+
register_model_provider("vllm", "openai-compatible", base_url="http://localhost:8000/v1")
|
|
73
|
+
|
|
74
|
+
@tool
|
|
75
|
+
def get_current_weather(location: str) -> str:
|
|
76
|
+
"""Get the current weather for the specified location"""
|
|
77
|
+
return f"25 degrees, {location}"
|
|
78
|
+
|
|
79
|
+
# Dynamically load model using string
|
|
80
|
+
model = load_chat_model("vllm:qwen3-4b")
|
|
81
|
+
response = model.invoke("Hello")
|
|
82
|
+
print(response)
|
|
83
|
+
|
|
84
|
+
# Create agent
|
|
85
|
+
agent = create_agent("vllm:qwen3-4b", tools=[get_current_weather])
|
|
86
|
+
response = agent.invoke({"messages": [HumanMessage(content="What's the weather like in New York today?")]})
|
|
87
|
+
print(response)
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
**For more features of this library, please visit the [full documentation](https://tbice123123.github.io/langchain-dev-utils/)**
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
## 🛠️ GitHub Repository
|
|
94
|
+
|
|
95
|
+
Visit the [GitHub repository](https://github.com/TBice123123/langchain-dev-utils) to view the source code and report issues.
|
|
96
|
+
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
<div align="center">
|
|
100
|
+
<p>Developed with ❤️ and ☕</p>
|
|
101
|
+
<p>If this project helps you, please give us a ⭐️</p>
|
|
102
|
+
</div>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
langchain_dev_utils/__init__.py,sha256=
|
|
1
|
+
langchain_dev_utils/__init__.py,sha256=Q6rDLuL8XHKQggYBtRCtxzpPQJgFYWn4x0gcVlH7H4g,23
|
|
2
2
|
langchain_dev_utils/_utils.py,sha256=MFEzR1BjXMj6HEVwt2x2omttFuDJ_rYAEbNqe99r9pM,1338
|
|
3
3
|
langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
langchain_dev_utils/agents/__init__.py,sha256=PJ-lSDZv_AXMYA3H4fx-HzJa14tPbkGmq1HX8LNfaPo,125
|
|
@@ -9,7 +9,7 @@ langchain_dev_utils/agents/wrap.py,sha256=RuchoH_VotPmKFuYEn2SXoSgNxZhSA9jKM0Iv_
|
|
|
9
9
|
langchain_dev_utils/agents/middleware/__init__.py,sha256=EECbcYcHXQAMA-guJNRGwCVi9jG957d0nOaoIuyIKC0,832
|
|
10
10
|
langchain_dev_utils/agents/middleware/format_prompt.py,sha256=rfii98tmOqkjaNHxWy7hovhEYKXrF0CdzsMLO54_CDI,2359
|
|
11
11
|
langchain_dev_utils/agents/middleware/model_fallback.py,sha256=nivtXXF4cwyOBv6p7RW12nXtNg87wjTWxO3BKIYiroI,1674
|
|
12
|
-
langchain_dev_utils/agents/middleware/model_router.py,sha256=
|
|
12
|
+
langchain_dev_utils/agents/middleware/model_router.py,sha256=sBXp1D1u_mv9g_JN_RcwKp9DONEY3XMWQVb4nGHPgkw,7925
|
|
13
13
|
langchain_dev_utils/agents/middleware/plan.py,sha256=0qDCmenxgY_zrwMfOyYlgLfhYNw-HszNLeeOkfj14NA,16002
|
|
14
14
|
langchain_dev_utils/agents/middleware/summarization.py,sha256=IoZ2PM1OC3AXwf0DWpfreuPOAipeiYu0KPmAABWXuY0,3087
|
|
15
15
|
langchain_dev_utils/agents/middleware/tool_call_repair.py,sha256=oZF0Oejemqs9kSn8xbW79FWyVVarL4IGCz0gpqYBkFM,3529
|
|
@@ -32,7 +32,7 @@ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV
|
|
|
32
32
|
langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
|
|
33
33
|
langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=7Z_QO5OZUR6K8nLoIcafc6osnvX2IYNorOJcbx6bVso,9672
|
|
34
34
|
langchain_dev_utils/tool_calling/utils.py,sha256=S4-KXQ8jWmpGTXYZitovF8rxKpaSSUkFruM8LDwvcvE,2765
|
|
35
|
-
langchain_dev_utils-1.2.
|
|
36
|
-
langchain_dev_utils-1.2.
|
|
37
|
-
langchain_dev_utils-1.2.
|
|
38
|
-
langchain_dev_utils-1.2.
|
|
35
|
+
langchain_dev_utils-1.2.15.dist-info/METADATA,sha256=f5J-j-1HdBe7bor7rXmgFWaLEXYXci0CpppO8hJaJ2U,4516
|
|
36
|
+
langchain_dev_utils-1.2.15.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
37
|
+
langchain_dev_utils-1.2.15.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
|
|
38
|
+
langchain_dev_utils-1.2.15.dist-info/RECORD,,
|
|
@@ -1,345 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: langchain-dev-utils
|
|
3
|
-
Version: 1.2.13
|
|
4
|
-
Summary: A practical utility library for LangChain and LangGraph development
|
|
5
|
-
Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
|
|
6
|
-
Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
|
|
7
|
-
Project-URL: documentation, https://tbice123123.github.io/langchain-dev-utils
|
|
8
|
-
Author-email: tiebingice <tiebingice123@outlook.com>
|
|
9
|
-
License-File: LICENSE
|
|
10
|
-
Requires-Python: >=3.11
|
|
11
|
-
Requires-Dist: langchain>=1.1.0
|
|
12
|
-
Requires-Dist: langgraph>=1.0.0
|
|
13
|
-
Provides-Extra: standard
|
|
14
|
-
Requires-Dist: json-repair>=0.53.1; extra == 'standard'
|
|
15
|
-
Requires-Dist: langchain-openai; extra == 'standard'
|
|
16
|
-
Description-Content-Type: text/markdown
|
|
17
|
-
|
|
18
|
-
# 🦜️🧰 langchain-dev-utils
|
|
19
|
-
|
|
20
|
-
<p align="center">
|
|
21
|
-
<em>A utility library for LangChain and LangGraph development.</em>
|
|
22
|
-
</p>
|
|
23
|
-
|
|
24
|
-
<p align="center">
|
|
25
|
-
📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
|
|
26
|
-
<a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
|
|
27
|
-
</p>
|
|
28
|
-
|
|
29
|
-
[](https://pypi.org/project/langchain-dev-utils/)
|
|
30
|
-
[](https://opensource.org/licenses/MIT)
|
|
31
|
-
[](https://www.python.org/downloads)
|
|
32
|
-
[](https://pepy.tech/project/langchain-dev-utils)
|
|
33
|
-
[](https://tbice123123.github.io/langchain-dev-utils/)
|
|
34
|
-
|
|
35
|
-
> This is the English version. For the Chinese version, please visit [中文文档](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
|
|
36
|
-
|
|
37
|
-
**langchain-dev-utils** is a utility library focused on enhancing the development experience of LangChain and LangGraph. It provides a series of ready-to-use utility functions that can reduce repetitive code writing and improve code consistency and readability. By simplifying the development workflow, this library can help you build prototypes faster, iterate more smoothly, and create clearer and more reliable AI applications based on large language models.
|
|
38
|
-
|
|
39
|
-
## 🚀 Installation
|
|
40
|
-
|
|
41
|
-
```bash
|
|
42
|
-
pip install -U langchain-dev-utils
|
|
43
|
-
|
|
44
|
-
# Install full-featured version:
|
|
45
|
-
pip install -U langchain-dev-utils[standard]
|
|
46
|
-
```
|
|
47
|
-
|
|
48
|
-
## 📦 Core Features
|
|
49
|
-
|
|
50
|
-
### 1. **Model Management**
|
|
51
|
-
|
|
52
|
-
In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but they support a limited number of model providers. This module provides registration functions (`register_model_provider`/`register_embeddings_provider`) to easily register any model provider for later use with `load_chat_model` / `load_embeddings` for model loading.
|
|
53
|
-
|
|
54
|
-
#### 1.1 Chat Model Management
|
|
55
|
-
|
|
56
|
-
There are two main functions:
|
|
57
|
-
|
|
58
|
-
- `register_model_provider`: Register a chat model provider
|
|
59
|
-
- `load_chat_model`: Load a chat model
|
|
60
|
-
|
|
61
|
-
Assuming you want to use the qwen3-4b model deployed with `vllm`, the reference code is as follows:
|
|
62
|
-
|
|
63
|
-
```python
|
|
64
|
-
from langchain_dev_utils.chat_models import (
|
|
65
|
-
register_model_provider,
|
|
66
|
-
load_chat_model,
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
# Register model provider
|
|
70
|
-
register_model_provider(
|
|
71
|
-
provider_name="vllm",
|
|
72
|
-
chat_model="openai-compatible",
|
|
73
|
-
base_url="http://localhost:8000/v1",
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
# Load model
|
|
77
|
-
model = load_chat_model("vllm:qwen3-4b")
|
|
78
|
-
print(model.invoke("Hello"))
|
|
79
|
-
```
|
|
80
|
-
|
|
81
|
-
#### 1.2 Embedding Model Management
|
|
82
|
-
|
|
83
|
-
There are two main functions:
|
|
84
|
-
|
|
85
|
-
- `register_embeddings_provider`: Register an embedding model provider
|
|
86
|
-
- `load_embeddings`: Load an embedding model
|
|
87
|
-
|
|
88
|
-
Assuming you want to use the qwen3-embedding-4b model deployed with `vllm`, the reference code is as follows:
|
|
89
|
-
|
|
90
|
-
```python
|
|
91
|
-
from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
|
|
92
|
-
|
|
93
|
-
# Register embedding model provider
|
|
94
|
-
register_embeddings_provider(
|
|
95
|
-
provider_name="vllm",
|
|
96
|
-
embeddings_model="openai-compatible",
|
|
97
|
-
base_url="http://localhost:8000/v1",
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
# Load embedding model
|
|
101
|
-
embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
102
|
-
emb = embeddings.embed_query("Hello")
|
|
103
|
-
print(emb)
|
|
104
|
-
```
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
### 2. **Message Conversion**
|
|
108
|
-
|
|
109
|
-
Includes the following features:
|
|
110
|
-
|
|
111
|
-
- Merge chain-of-thought content into final responses
|
|
112
|
-
- Stream content merging
|
|
113
|
-
- Content formatting tools
|
|
114
|
-
|
|
115
|
-
#### 2.1 Stream Content Merging
|
|
116
|
-
|
|
117
|
-
For streaming responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
|
|
118
|
-
|
|
119
|
-
```python
|
|
120
|
-
from langchain_dev_utils.message_convert import merge_ai_message_chunk
|
|
121
|
-
chunks = list(model.stream("Hello"))
|
|
122
|
-
merged = merge_ai_message_chunk(chunks)
|
|
123
|
-
```
|
|
124
|
-
|
|
125
|
-
#### 2.2 Format List Content
|
|
126
|
-
|
|
127
|
-
For a list, you can use `format_sequence` to format it.
|
|
128
|
-
|
|
129
|
-
```python
|
|
130
|
-
from langchain_dev_utils.message_convert import format_sequence
|
|
131
|
-
text = format_sequence([
|
|
132
|
-
"str1",
|
|
133
|
-
"str2",
|
|
134
|
-
"str3"
|
|
135
|
-
], separator="\n", with_num=True)
|
|
136
|
-
```
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
### 3. **Tool Calling**
|
|
140
|
-
|
|
141
|
-
Includes the following features:
|
|
142
|
-
|
|
143
|
-
- Check and parse tool calls
|
|
144
|
-
- Add human-in-the-loop functionality
|
|
145
|
-
|
|
146
|
-
#### 3.1 Check and Parse Tool Calls
|
|
147
|
-
|
|
148
|
-
`has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
|
|
149
|
-
|
|
150
|
-
```python
|
|
151
|
-
import datetime
|
|
152
|
-
from langchain_core.tools import tool
|
|
153
|
-
from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
|
|
154
|
-
|
|
155
|
-
@tool
|
|
156
|
-
def get_current_time() -> str:
|
|
157
|
-
"""Get current timestamp"""
|
|
158
|
-
return str(datetime.datetime.now().timestamp())
|
|
159
|
-
|
|
160
|
-
response = model.bind_tools([get_current_time]).invoke("What time is it now?")
|
|
161
|
-
|
|
162
|
-
if has_tool_calling(response):
|
|
163
|
-
name, args = parse_tool_calling(
|
|
164
|
-
response, first_tool_call_only=True
|
|
165
|
-
)
|
|
166
|
-
print(name, args)
|
|
167
|
-
```
|
|
168
|
-
|
|
169
|
-
#### 3.2 Add Human-in-the-Loop Functionality
|
|
170
|
-
|
|
171
|
-
- `human_in_the_loop`: For synchronous tool functions
|
|
172
|
-
- `human_in_the_loop_async`: For asynchronous tool functions
|
|
173
|
-
|
|
174
|
-
Both can accept a `handler` parameter for custom breakpoint return and response handling logic.
|
|
175
|
-
|
|
176
|
-
```python
|
|
177
|
-
from langchain_dev_utils.tool_calling import human_in_the_loop
|
|
178
|
-
from langchain_core.tools import tool
|
|
179
|
-
import datetime
|
|
180
|
-
|
|
181
|
-
@human_in_the_loop
|
|
182
|
-
@tool
|
|
183
|
-
def get_current_time() -> str:
|
|
184
|
-
"""Get current timestamp"""
|
|
185
|
-
return str(datetime.datetime.now().timestamp())
|
|
186
|
-
```
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
### 4. **Agent Development**
|
|
190
|
-
|
|
191
|
-
Includes the following features:
|
|
192
|
-
|
|
193
|
-
- Multi-agent construction
|
|
194
|
-
- Common middleware components
|
|
195
|
-
|
|
196
|
-
#### 4.1 Multi-Agent Construction
|
|
197
|
-
|
|
198
|
-
Wrapping agents as tools is a common implementation pattern in multi-agent systems, which is detailed in the official LangChain documentation. To this end, this library provides a pre-built function `wrap_agent_as_tool` to implement this pattern, which can wrap an agent instance into a tool that can be called by other agents.
|
|
199
|
-
|
|
200
|
-
Usage example:
|
|
201
|
-
|
|
202
|
-
```python
|
|
203
|
-
import datetime
|
|
204
|
-
from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
|
|
205
|
-
from langchain.agents import AgentState
|
|
206
|
-
|
|
207
|
-
@tool
|
|
208
|
-
def get_current_time() -> str:
|
|
209
|
-
"""Get current time"""
|
|
210
|
-
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
211
|
-
|
|
212
|
-
time_agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
|
|
213
|
-
call_time_agent_tool = wrap_agent_as_tool(time_agent)
|
|
214
|
-
|
|
215
|
-
agent = create_agent(
|
|
216
|
-
"vllm:qwen3-4b",
|
|
217
|
-
name="agent",
|
|
218
|
-
tools=[call_time_agent_tool],
|
|
219
|
-
)
|
|
220
|
-
response = agent.invoke(
|
|
221
|
-
{"messages": [{"role": "user", "content": "What time is it now?"}]}
|
|
222
|
-
)
|
|
223
|
-
print(response)
|
|
224
|
-
```
|
|
225
|
-
|
|
226
|
-
#### 4.2 Middleware
|
|
227
|
-
|
|
228
|
-
Provides some common middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
|
|
229
|
-
|
|
230
|
-
`ToolCallRepairMiddleware` is used to fix `invaild_tool_calls` content from large models.
|
|
231
|
-
|
|
232
|
-
`PlanMiddleware` is used for agent planning.
|
|
233
|
-
|
|
234
|
-
```python
|
|
235
|
-
from langchain_dev_utils.agents.middleware import (
|
|
236
|
-
ToolCallRepairMiddleware,
|
|
237
|
-
PlanMiddleware,
|
|
238
|
-
)
|
|
239
|
-
|
|
240
|
-
agent=create_agent(
|
|
241
|
-
"vllm:qwen3-4b",
|
|
242
|
-
name="plan-agent",
|
|
243
|
-
middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
|
|
244
|
-
use_read_plan_tool=False
|
|
245
|
-
)]
|
|
246
|
-
)
|
|
247
|
-
response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
|
|
248
|
-
print(response)
|
|
249
|
-
```
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
### 5. **State Graph Orchestration**
|
|
253
|
-
|
|
254
|
-
Includes the following features:
|
|
255
|
-
|
|
256
|
-
- Sequential graph orchestration
|
|
257
|
-
- Parallel graph orchestration
|
|
258
|
-
|
|
259
|
-
#### 5.1 Sequential Graph Orchestration
|
|
260
|
-
|
|
261
|
-
Using `create_sequential_pipeline`, you can orchestrate multiple subgraphs in sequence:
|
|
262
|
-
|
|
263
|
-
```python
|
|
264
|
-
from langchain.agents import AgentState
|
|
265
|
-
from langchain_core.messages import HumanMessage
|
|
266
|
-
from langchain_dev_utils.agents import create_agent
|
|
267
|
-
from langchain_dev_utils.pipeline import create_sequential_pipeline
|
|
268
|
-
from langchain_dev_utils.chat_models import register_model_provider
|
|
269
|
-
|
|
270
|
-
register_model_provider(
|
|
271
|
-
provider_name="vllm",
|
|
272
|
-
chat_model="openai-compatible",
|
|
273
|
-
base_url="http://localhost:8000/v1",
|
|
274
|
-
)
|
|
275
|
-
|
|
276
|
-
# Build sequential pipeline (all subgraphs execute in sequence)
|
|
277
|
-
graph = create_sequential_pipeline(
|
|
278
|
-
sub_graphs=[
|
|
279
|
-
create_agent(
|
|
280
|
-
model="vllm:qwen3-4b",
|
|
281
|
-
tools=[get_current_time],
|
|
282
|
-
system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
|
|
283
|
-
name="time_agent",
|
|
284
|
-
),
|
|
285
|
-
create_agent(
|
|
286
|
-
model="vllm:qwen3-4b",
|
|
287
|
-
tools=[get_current_weather],
|
|
288
|
-
system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
|
|
289
|
-
name="weather_agent",
|
|
290
|
-
),
|
|
291
|
-
create_agent(
|
|
292
|
-
model="vllm:qwen3-4b",
|
|
293
|
-
tools=[get_current_user],
|
|
294
|
-
system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
|
|
295
|
-
name="user_agent",
|
|
296
|
-
),
|
|
297
|
-
],
|
|
298
|
-
state_schema=AgentState,
|
|
299
|
-
)
|
|
300
|
-
|
|
301
|
-
response = graph.invoke({"messages": [HumanMessage("Hello")]})
|
|
302
|
-
print(response)
|
|
303
|
-
```
|
|
304
|
-
|
|
305
|
-
#### 5.2 Parallel Graph Orchestration
|
|
306
|
-
|
|
307
|
-
Using `create_parallel_pipeline`, you can orchestrate multiple subgraphs in parallel:
|
|
308
|
-
|
|
309
|
-
```python
|
|
310
|
-
from langchain_dev_utils.pipeline import create_parallel_pipeline
|
|
311
|
-
|
|
312
|
-
# Build parallel pipeline (all subgraphs execute in parallel)
|
|
313
|
-
graph = create_parallel_pipeline(
|
|
314
|
-
sub_graphs=[
|
|
315
|
-
create_agent(
|
|
316
|
-
model="vllm:qwen3-4b",
|
|
317
|
-
tools=[get_current_time],
|
|
318
|
-
system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
|
|
319
|
-
name="time_agent",
|
|
320
|
-
),
|
|
321
|
-
create_agent(
|
|
322
|
-
model="vllm:qwen3-4b",
|
|
323
|
-
tools=[get_current_weather],
|
|
324
|
-
system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
|
|
325
|
-
name="weather_agent",
|
|
326
|
-
),
|
|
327
|
-
create_agent(
|
|
328
|
-
model="vllm:qwen3-4b",
|
|
329
|
-
tools=[get_current_user],
|
|
330
|
-
system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
|
|
331
|
-
name="user_agent",
|
|
332
|
-
),
|
|
333
|
-
],
|
|
334
|
-
state_schema=AgentState,
|
|
335
|
-
)
|
|
336
|
-
response = graph.invoke({"messages": [HumanMessage("Hello")]})
|
|
337
|
-
print(response)
|
|
338
|
-
```
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
## 💬 Join the Community
|
|
342
|
-
|
|
343
|
-
- [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
|
|
344
|
-
- [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
|
|
345
|
-
- We welcome all forms of contributions — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together
|
|
File without changes
|
{langchain_dev_utils-1.2.13.dist-info → langchain_dev_utils-1.2.15.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|