langchain-dev-utils 1.2.10__tar.gz → 1.2.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/.gitignore +4 -1
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/PKG-INFO +44 -54
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/README.md +43 -53
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/README_cn.md +3 -8
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/pyproject.toml +6 -2
- langchain_dev_utils-1.2.12/src/langchain_dev_utils/__init__.py +1 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/__init__.py +2 -0
- langchain_dev_utils-1.2.12/src/langchain_dev_utils/agents/middleware/format_prompt.py +66 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/chat_models/base.py +3 -10
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/chat_models/types.py +9 -1
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/uv.lock +341 -1
- langchain_dev_utils-1.2.10/src/langchain_dev_utils/__init__.py +0 -1
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/.python-version +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/.vscode/settings.json +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/LICENSE +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/_utils.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/factory.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/file_system.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/plan.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/agents/wrap.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/embeddings/base.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/message_convert/content.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/message_convert/format.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/pipeline/types.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/py.typed +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/__init__.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_agent.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_chat_models.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_human_in_the_loop.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_load_embbeding.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_load_model.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_messages.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_model_tool_emulator.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_pipline.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_plan_middleware.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_router_model.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_tool_call_repair.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_tool_calling.py +0 -0
- {langchain_dev_utils-1.2.10 → langchain_dev_utils-1.2.12}/tests/test_wrap_agent.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-dev-utils
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.12
|
|
4
4
|
Summary: A practical utility library for LangChain and LangGraph development
|
|
5
5
|
Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
|
|
6
6
|
Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
|
|
@@ -22,26 +22,26 @@ Description-Content-Type: text/markdown
|
|
|
22
22
|
</p>
|
|
23
23
|
|
|
24
24
|
<p align="center">
|
|
25
|
-
📚 <a href="https://tbice123123.github.io/langchain-dev-utils
|
|
26
|
-
<a href="https://tbice123123.github.io/langchain-dev-utils
|
|
25
|
+
📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
|
|
26
|
+
<a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
|
|
27
27
|
</p>
|
|
28
28
|
|
|
29
29
|
[](https://pypi.org/project/langchain-dev-utils/)
|
|
30
30
|
[](https://opensource.org/licenses/MIT)
|
|
31
31
|
[](https://www.python.org/downloads)
|
|
32
32
|
[](https://pepy.tech/project/langchain-dev-utils)
|
|
33
|
-
[](https://tbice123123.github.io/langchain-dev-utils
|
|
33
|
+
[](https://tbice123123.github.io/langchain-dev-utils/)
|
|
34
34
|
|
|
35
|
-
> This is the English version. For the Chinese version, please visit [
|
|
35
|
+
> This is the English version. For the Chinese version, please visit [中文文档](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
|
|
36
36
|
|
|
37
|
-
**langchain-dev-utils** is a utility library focused on enhancing the development experience
|
|
37
|
+
**langchain-dev-utils** is a utility library focused on enhancing the development experience of LangChain and LangGraph. It provides a series of ready-to-use utility functions that can reduce repetitive code writing and improve code consistency and readability. By simplifying the development workflow, this library can help you build prototypes faster, iterate more smoothly, and create clearer and more reliable AI applications based on large language models.
|
|
38
38
|
|
|
39
39
|
## 🚀 Installation
|
|
40
40
|
|
|
41
41
|
```bash
|
|
42
42
|
pip install -U langchain-dev-utils
|
|
43
43
|
|
|
44
|
-
# Install
|
|
44
|
+
# Install full-featured version:
|
|
45
45
|
pip install -U langchain-dev-utils[standard]
|
|
46
46
|
```
|
|
47
47
|
|
|
@@ -49,16 +49,16 @@ pip install -U langchain-dev-utils[standard]
|
|
|
49
49
|
|
|
50
50
|
### 1. **Model Management**
|
|
51
51
|
|
|
52
|
-
In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but
|
|
52
|
+
In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but they support a limited number of model providers. This module provides registration functions (`register_model_provider`/`register_embeddings_provider`) to easily register any model provider for later use with `load_chat_model` / `load_embeddings` for model loading.
|
|
53
53
|
|
|
54
54
|
#### 1.1 Chat Model Management
|
|
55
55
|
|
|
56
|
-
|
|
56
|
+
There are two main functions:
|
|
57
57
|
|
|
58
58
|
- `register_model_provider`: Register a chat model provider
|
|
59
59
|
- `load_chat_model`: Load a chat model
|
|
60
60
|
|
|
61
|
-
|
|
61
|
+
Assuming you want to use the qwen3-4b model deployed with `vllm`, the reference code is as follows:
|
|
62
62
|
|
|
63
63
|
```python
|
|
64
64
|
from langchain_dev_utils.chat_models import (
|
|
@@ -80,12 +80,12 @@ print(model.invoke("Hello"))
|
|
|
80
80
|
|
|
81
81
|
#### 1.2 Embedding Model Management
|
|
82
82
|
|
|
83
|
-
|
|
83
|
+
There are two main functions:
|
|
84
84
|
|
|
85
85
|
- `register_embeddings_provider`: Register an embedding model provider
|
|
86
86
|
- `load_embeddings`: Load an embedding model
|
|
87
87
|
|
|
88
|
-
|
|
88
|
+
Assuming you want to use the qwen3-embedding-4b model deployed with `vllm`, the reference code is as follows:
|
|
89
89
|
|
|
90
90
|
```python
|
|
91
91
|
from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
|
|
@@ -103,23 +103,21 @@ emb = embeddings.embed_query("Hello")
|
|
|
103
103
|
print(emb)
|
|
104
104
|
```
|
|
105
105
|
|
|
106
|
-
**For more information about model management, please refer to**: [Chat Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/chat.html), [Embedding Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/embedding.html)
|
|
107
106
|
|
|
108
107
|
### 2. **Message Conversion**
|
|
109
108
|
|
|
110
109
|
Includes the following features:
|
|
111
110
|
|
|
112
|
-
- Merge
|
|
111
|
+
- Merge chain-of-thought content into final responses
|
|
113
112
|
- Stream content merging
|
|
114
113
|
- Content formatting tools
|
|
115
114
|
|
|
116
115
|
#### 2.1 Stream Content Merging
|
|
117
116
|
|
|
118
|
-
For
|
|
117
|
+
For streaming responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
|
|
119
118
|
|
|
120
119
|
```python
|
|
121
120
|
from langchain_dev_utils.message_convert import merge_ai_message_chunk
|
|
122
|
-
|
|
123
121
|
chunks = list(model.stream("Hello"))
|
|
124
122
|
merged = merge_ai_message_chunk(chunks)
|
|
125
123
|
```
|
|
@@ -137,7 +135,6 @@ text = format_sequence([
|
|
|
137
135
|
], separator="\n", with_num=True)
|
|
138
136
|
```
|
|
139
137
|
|
|
140
|
-
**For more information about message conversion, please refer to**: [Message Process](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/message.html), [Formatting List Content](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/format.html)
|
|
141
138
|
|
|
142
139
|
### 3. **Tool Calling**
|
|
143
140
|
|
|
@@ -157,10 +154,10 @@ from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_callin
|
|
|
157
154
|
|
|
158
155
|
@tool
|
|
159
156
|
def get_current_time() -> str:
|
|
160
|
-
"""Get
|
|
157
|
+
"""Get current timestamp"""
|
|
161
158
|
return str(datetime.datetime.now().timestamp())
|
|
162
159
|
|
|
163
|
-
response = model.bind_tools([get_current_time]).invoke("What time is it?")
|
|
160
|
+
response = model.bind_tools([get_current_time]).invoke("What time is it now?")
|
|
164
161
|
|
|
165
162
|
if has_tool_calling(response):
|
|
166
163
|
name, args = parse_tool_calling(
|
|
@@ -184,24 +181,23 @@ import datetime
|
|
|
184
181
|
@human_in_the_loop
|
|
185
182
|
@tool
|
|
186
183
|
def get_current_time() -> str:
|
|
187
|
-
"""Get
|
|
184
|
+
"""Get current timestamp"""
|
|
188
185
|
return str(datetime.datetime.now().timestamp())
|
|
189
186
|
```
|
|
190
187
|
|
|
191
|
-
**For more information about tool calling, please refer to**: [Add Human-in-the-Loop Support](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/human-in-the-loop.html), [Tool Call Handling](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/tool.html)
|
|
192
188
|
|
|
193
189
|
### 4. **Agent Development**
|
|
194
190
|
|
|
195
|
-
Includes the following
|
|
191
|
+
Includes the following features:
|
|
196
192
|
|
|
197
|
-
- Multi-agent construction
|
|
198
|
-
-
|
|
193
|
+
- Multi-agent construction
|
|
194
|
+
- Common middleware components
|
|
199
195
|
|
|
200
196
|
#### 4.1 Multi-Agent Construction
|
|
201
197
|
|
|
202
|
-
Wrapping
|
|
198
|
+
Wrapping agents as tools is a common implementation pattern in multi-agent systems, which is detailed in the official LangChain documentation. To this end, this library provides a pre-built function `wrap_agent_as_tool` to implement this pattern, which can wrap an agent instance into a tool that can be called by other agents.
|
|
203
199
|
|
|
204
|
-
|
|
200
|
+
Usage example:
|
|
205
201
|
|
|
206
202
|
```python
|
|
207
203
|
import datetime
|
|
@@ -210,7 +206,7 @@ from langchain.agents import AgentState
|
|
|
210
206
|
|
|
211
207
|
@tool
|
|
212
208
|
def get_current_time() -> str:
|
|
213
|
-
"""Get
|
|
209
|
+
"""Get current time"""
|
|
214
210
|
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
215
211
|
|
|
216
212
|
time_agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
|
|
@@ -229,10 +225,11 @@ print(response)
|
|
|
229
225
|
|
|
230
226
|
#### 4.2 Middleware
|
|
231
227
|
|
|
232
|
-
Provides
|
|
228
|
+
Provides some common middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
|
|
233
229
|
|
|
234
|
-
|
|
235
|
-
|
|
230
|
+
`ToolCallRepairMiddleware` is used to fix `invaild_tool_calls` content from large models.
|
|
231
|
+
|
|
232
|
+
`PlanMiddleware` is used for agent planning.
|
|
236
233
|
|
|
237
234
|
```python
|
|
238
235
|
from langchain_dev_utils.agents.middleware import (
|
|
@@ -240,33 +237,28 @@ from langchain_dev_utils.agents.middleware import (
|
|
|
240
237
|
PlanMiddleware,
|
|
241
238
|
)
|
|
242
239
|
|
|
243
|
-
agent
|
|
240
|
+
agent=create_agent(
|
|
244
241
|
"vllm:qwen3-4b",
|
|
245
242
|
name="plan-agent",
|
|
246
|
-
middleware=[
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
]
|
|
243
|
+
middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
|
|
244
|
+
use_read_plan_tool=False
|
|
245
|
+
)]
|
|
250
246
|
)
|
|
251
|
-
response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan
|
|
247
|
+
response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
|
|
252
248
|
print(response)
|
|
253
249
|
```
|
|
254
250
|
|
|
255
|
-
**For more details on agent development and a complete list of built-in middleware, please refer to**:
|
|
256
|
-
[Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
|
|
257
|
-
[Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
|
|
258
|
-
|
|
259
251
|
|
|
260
252
|
### 5. **State Graph Orchestration**
|
|
261
253
|
|
|
262
|
-
Includes the following
|
|
254
|
+
Includes the following features:
|
|
263
255
|
|
|
264
256
|
- Sequential graph orchestration
|
|
265
257
|
- Parallel graph orchestration
|
|
266
258
|
|
|
267
259
|
#### 5.1 Sequential Graph Orchestration
|
|
268
260
|
|
|
269
|
-
|
|
261
|
+
Using `create_sequential_pipeline`, you can orchestrate multiple subgraphs in sequence:
|
|
270
262
|
|
|
271
263
|
```python
|
|
272
264
|
from langchain.agents import AgentState
|
|
@@ -281,25 +273,25 @@ register_model_provider(
|
|
|
281
273
|
base_url="http://localhost:8000/v1",
|
|
282
274
|
)
|
|
283
275
|
|
|
284
|
-
# Build
|
|
276
|
+
# Build sequential pipeline (all subgraphs execute in sequence)
|
|
285
277
|
graph = create_sequential_pipeline(
|
|
286
278
|
sub_graphs=[
|
|
287
279
|
create_agent(
|
|
288
280
|
model="vllm:qwen3-4b",
|
|
289
281
|
tools=[get_current_time],
|
|
290
|
-
system_prompt="You are a time
|
|
282
|
+
system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
|
|
291
283
|
name="time_agent",
|
|
292
284
|
),
|
|
293
285
|
create_agent(
|
|
294
286
|
model="vllm:qwen3-4b",
|
|
295
287
|
tools=[get_current_weather],
|
|
296
|
-
system_prompt="You are a weather
|
|
288
|
+
system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
|
|
297
289
|
name="weather_agent",
|
|
298
290
|
),
|
|
299
291
|
create_agent(
|
|
300
292
|
model="vllm:qwen3-4b",
|
|
301
293
|
tools=[get_current_user],
|
|
302
|
-
system_prompt="You are a user
|
|
294
|
+
system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
|
|
303
295
|
name="user_agent",
|
|
304
296
|
),
|
|
305
297
|
],
|
|
@@ -312,44 +304,42 @@ print(response)
|
|
|
312
304
|
|
|
313
305
|
#### 5.2 Parallel Graph Orchestration
|
|
314
306
|
|
|
315
|
-
|
|
307
|
+
Using `create_parallel_pipeline`, you can orchestrate multiple subgraphs in parallel:
|
|
316
308
|
|
|
317
309
|
```python
|
|
318
310
|
from langchain_dev_utils.pipeline import create_parallel_pipeline
|
|
319
311
|
|
|
320
|
-
# Build
|
|
312
|
+
# Build parallel pipeline (all subgraphs execute in parallel)
|
|
321
313
|
graph = create_parallel_pipeline(
|
|
322
314
|
sub_graphs=[
|
|
323
315
|
create_agent(
|
|
324
316
|
model="vllm:qwen3-4b",
|
|
325
317
|
tools=[get_current_time],
|
|
326
|
-
system_prompt="You are a time
|
|
318
|
+
system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
|
|
327
319
|
name="time_agent",
|
|
328
320
|
),
|
|
329
321
|
create_agent(
|
|
330
322
|
model="vllm:qwen3-4b",
|
|
331
323
|
tools=[get_current_weather],
|
|
332
|
-
system_prompt="You are a weather
|
|
324
|
+
system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
|
|
333
325
|
name="weather_agent",
|
|
334
326
|
),
|
|
335
327
|
create_agent(
|
|
336
328
|
model="vllm:qwen3-4b",
|
|
337
329
|
tools=[get_current_user],
|
|
338
|
-
system_prompt="You are a user
|
|
330
|
+
system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
|
|
339
331
|
name="user_agent",
|
|
340
332
|
),
|
|
341
333
|
],
|
|
342
334
|
state_schema=AgentState,
|
|
343
335
|
)
|
|
344
|
-
|
|
345
336
|
response = graph.invoke({"messages": [HumanMessage("Hello")]})
|
|
346
337
|
print(response)
|
|
347
338
|
```
|
|
348
339
|
|
|
349
|
-
**For more information about state graph orchestration, please refer to**: [State Graph Orchestration](https://tbice123123.github.io/langchain-dev-utils-docs/en/graph-orchestration/pipeline.html)
|
|
350
340
|
|
|
351
341
|
## 💬 Join the Community
|
|
352
342
|
|
|
353
343
|
- [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
|
|
354
344
|
- [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
|
|
355
|
-
- We welcome
|
|
345
|
+
- We welcome all forms of contributions — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together
|
|
@@ -5,26 +5,26 @@
|
|
|
5
5
|
</p>
|
|
6
6
|
|
|
7
7
|
<p align="center">
|
|
8
|
-
📚 <a href="https://tbice123123.github.io/langchain-dev-utils
|
|
9
|
-
<a href="https://tbice123123.github.io/langchain-dev-utils
|
|
8
|
+
📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
|
|
9
|
+
<a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
|
|
10
10
|
</p>
|
|
11
11
|
|
|
12
12
|
[](https://pypi.org/project/langchain-dev-utils/)
|
|
13
13
|
[](https://opensource.org/licenses/MIT)
|
|
14
14
|
[](https://www.python.org/downloads)
|
|
15
15
|
[](https://pepy.tech/project/langchain-dev-utils)
|
|
16
|
-
[](https://tbice123123.github.io/langchain-dev-utils
|
|
16
|
+
[](https://tbice123123.github.io/langchain-dev-utils/)
|
|
17
17
|
|
|
18
|
-
> This is the English version. For the Chinese version, please visit [
|
|
18
|
+
> This is the English version. For the Chinese version, please visit [中文文档](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
|
|
19
19
|
|
|
20
|
-
**langchain-dev-utils** is a utility library focused on enhancing the development experience
|
|
20
|
+
**langchain-dev-utils** is a utility library focused on enhancing the development experience of LangChain and LangGraph. It provides a series of ready-to-use utility functions that can reduce repetitive code writing and improve code consistency and readability. By simplifying the development workflow, this library can help you build prototypes faster, iterate more smoothly, and create clearer and more reliable AI applications based on large language models.
|
|
21
21
|
|
|
22
22
|
## 🚀 Installation
|
|
23
23
|
|
|
24
24
|
```bash
|
|
25
25
|
pip install -U langchain-dev-utils
|
|
26
26
|
|
|
27
|
-
# Install
|
|
27
|
+
# Install full-featured version:
|
|
28
28
|
pip install -U langchain-dev-utils[standard]
|
|
29
29
|
```
|
|
30
30
|
|
|
@@ -32,16 +32,16 @@ pip install -U langchain-dev-utils[standard]
|
|
|
32
32
|
|
|
33
33
|
### 1. **Model Management**
|
|
34
34
|
|
|
35
|
-
In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but
|
|
35
|
+
In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but they support a limited number of model providers. This module provides registration functions (`register_model_provider`/`register_embeddings_provider`) to easily register any model provider for later use with `load_chat_model` / `load_embeddings` for model loading.
|
|
36
36
|
|
|
37
37
|
#### 1.1 Chat Model Management
|
|
38
38
|
|
|
39
|
-
|
|
39
|
+
There are two main functions:
|
|
40
40
|
|
|
41
41
|
- `register_model_provider`: Register a chat model provider
|
|
42
42
|
- `load_chat_model`: Load a chat model
|
|
43
43
|
|
|
44
|
-
|
|
44
|
+
Assuming you want to use the qwen3-4b model deployed with `vllm`, the reference code is as follows:
|
|
45
45
|
|
|
46
46
|
```python
|
|
47
47
|
from langchain_dev_utils.chat_models import (
|
|
@@ -63,12 +63,12 @@ print(model.invoke("Hello"))
|
|
|
63
63
|
|
|
64
64
|
#### 1.2 Embedding Model Management
|
|
65
65
|
|
|
66
|
-
|
|
66
|
+
There are two main functions:
|
|
67
67
|
|
|
68
68
|
- `register_embeddings_provider`: Register an embedding model provider
|
|
69
69
|
- `load_embeddings`: Load an embedding model
|
|
70
70
|
|
|
71
|
-
|
|
71
|
+
Assuming you want to use the qwen3-embedding-4b model deployed with `vllm`, the reference code is as follows:
|
|
72
72
|
|
|
73
73
|
```python
|
|
74
74
|
from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
|
|
@@ -86,23 +86,21 @@ emb = embeddings.embed_query("Hello")
|
|
|
86
86
|
print(emb)
|
|
87
87
|
```
|
|
88
88
|
|
|
89
|
-
**For more information about model management, please refer to**: [Chat Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/chat.html), [Embedding Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/embedding.html)
|
|
90
89
|
|
|
91
90
|
### 2. **Message Conversion**
|
|
92
91
|
|
|
93
92
|
Includes the following features:
|
|
94
93
|
|
|
95
|
-
- Merge
|
|
94
|
+
- Merge chain-of-thought content into final responses
|
|
96
95
|
- Stream content merging
|
|
97
96
|
- Content formatting tools
|
|
98
97
|
|
|
99
98
|
#### 2.1 Stream Content Merging
|
|
100
99
|
|
|
101
|
-
For
|
|
100
|
+
For streaming responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
|
|
102
101
|
|
|
103
102
|
```python
|
|
104
103
|
from langchain_dev_utils.message_convert import merge_ai_message_chunk
|
|
105
|
-
|
|
106
104
|
chunks = list(model.stream("Hello"))
|
|
107
105
|
merged = merge_ai_message_chunk(chunks)
|
|
108
106
|
```
|
|
@@ -120,7 +118,6 @@ text = format_sequence([
|
|
|
120
118
|
], separator="\n", with_num=True)
|
|
121
119
|
```
|
|
122
120
|
|
|
123
|
-
**For more information about message conversion, please refer to**: [Message Process](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/message.html), [Formatting List Content](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/format.html)
|
|
124
121
|
|
|
125
122
|
### 3. **Tool Calling**
|
|
126
123
|
|
|
@@ -140,10 +137,10 @@ from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_callin
|
|
|
140
137
|
|
|
141
138
|
@tool
|
|
142
139
|
def get_current_time() -> str:
|
|
143
|
-
"""Get
|
|
140
|
+
"""Get current timestamp"""
|
|
144
141
|
return str(datetime.datetime.now().timestamp())
|
|
145
142
|
|
|
146
|
-
response = model.bind_tools([get_current_time]).invoke("What time is it?")
|
|
143
|
+
response = model.bind_tools([get_current_time]).invoke("What time is it now?")
|
|
147
144
|
|
|
148
145
|
if has_tool_calling(response):
|
|
149
146
|
name, args = parse_tool_calling(
|
|
@@ -167,24 +164,23 @@ import datetime
|
|
|
167
164
|
@human_in_the_loop
|
|
168
165
|
@tool
|
|
169
166
|
def get_current_time() -> str:
|
|
170
|
-
"""Get
|
|
167
|
+
"""Get current timestamp"""
|
|
171
168
|
return str(datetime.datetime.now().timestamp())
|
|
172
169
|
```
|
|
173
170
|
|
|
174
|
-
**For more information about tool calling, please refer to**: [Add Human-in-the-Loop Support](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/human-in-the-loop.html), [Tool Call Handling](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/tool.html)
|
|
175
171
|
|
|
176
172
|
### 4. **Agent Development**
|
|
177
173
|
|
|
178
|
-
Includes the following
|
|
174
|
+
Includes the following features:
|
|
179
175
|
|
|
180
|
-
- Multi-agent construction
|
|
181
|
-
-
|
|
176
|
+
- Multi-agent construction
|
|
177
|
+
- Common middleware components
|
|
182
178
|
|
|
183
179
|
#### 4.1 Multi-Agent Construction
|
|
184
180
|
|
|
185
|
-
Wrapping
|
|
181
|
+
Wrapping agents as tools is a common implementation pattern in multi-agent systems, which is detailed in the official LangChain documentation. To this end, this library provides a pre-built function `wrap_agent_as_tool` to implement this pattern, which can wrap an agent instance into a tool that can be called by other agents.
|
|
186
182
|
|
|
187
|
-
|
|
183
|
+
Usage example:
|
|
188
184
|
|
|
189
185
|
```python
|
|
190
186
|
import datetime
|
|
@@ -193,7 +189,7 @@ from langchain.agents import AgentState
|
|
|
193
189
|
|
|
194
190
|
@tool
|
|
195
191
|
def get_current_time() -> str:
|
|
196
|
-
"""Get
|
|
192
|
+
"""Get current time"""
|
|
197
193
|
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
198
194
|
|
|
199
195
|
time_agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
|
|
@@ -212,10 +208,11 @@ print(response)
|
|
|
212
208
|
|
|
213
209
|
#### 4.2 Middleware
|
|
214
210
|
|
|
215
|
-
Provides
|
|
211
|
+
Provides some common middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
|
|
216
212
|
|
|
217
|
-
|
|
218
|
-
|
|
213
|
+
`ToolCallRepairMiddleware` is used to fix `invaild_tool_calls` content from large models.
|
|
214
|
+
|
|
215
|
+
`PlanMiddleware` is used for agent planning.
|
|
219
216
|
|
|
220
217
|
```python
|
|
221
218
|
from langchain_dev_utils.agents.middleware import (
|
|
@@ -223,33 +220,28 @@ from langchain_dev_utils.agents.middleware import (
|
|
|
223
220
|
PlanMiddleware,
|
|
224
221
|
)
|
|
225
222
|
|
|
226
|
-
agent
|
|
223
|
+
agent=create_agent(
|
|
227
224
|
"vllm:qwen3-4b",
|
|
228
225
|
name="plan-agent",
|
|
229
|
-
middleware=[
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
]
|
|
226
|
+
middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
|
|
227
|
+
use_read_plan_tool=False
|
|
228
|
+
)]
|
|
233
229
|
)
|
|
234
|
-
response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan
|
|
230
|
+
response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
|
|
235
231
|
print(response)
|
|
236
232
|
```
|
|
237
233
|
|
|
238
|
-
**For more details on agent development and a complete list of built-in middleware, please refer to**:
|
|
239
|
-
[Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
|
|
240
|
-
[Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
|
|
241
|
-
|
|
242
234
|
|
|
243
235
|
### 5. **State Graph Orchestration**
|
|
244
236
|
|
|
245
|
-
Includes the following
|
|
237
|
+
Includes the following features:
|
|
246
238
|
|
|
247
239
|
- Sequential graph orchestration
|
|
248
240
|
- Parallel graph orchestration
|
|
249
241
|
|
|
250
242
|
#### 5.1 Sequential Graph Orchestration
|
|
251
243
|
|
|
252
|
-
|
|
244
|
+
Using `create_sequential_pipeline`, you can orchestrate multiple subgraphs in sequence:
|
|
253
245
|
|
|
254
246
|
```python
|
|
255
247
|
from langchain.agents import AgentState
|
|
@@ -264,25 +256,25 @@ register_model_provider(
|
|
|
264
256
|
base_url="http://localhost:8000/v1",
|
|
265
257
|
)
|
|
266
258
|
|
|
267
|
-
# Build
|
|
259
|
+
# Build sequential pipeline (all subgraphs execute in sequence)
|
|
268
260
|
graph = create_sequential_pipeline(
|
|
269
261
|
sub_graphs=[
|
|
270
262
|
create_agent(
|
|
271
263
|
model="vllm:qwen3-4b",
|
|
272
264
|
tools=[get_current_time],
|
|
273
|
-
system_prompt="You are a time
|
|
265
|
+
system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
|
|
274
266
|
name="time_agent",
|
|
275
267
|
),
|
|
276
268
|
create_agent(
|
|
277
269
|
model="vllm:qwen3-4b",
|
|
278
270
|
tools=[get_current_weather],
|
|
279
|
-
system_prompt="You are a weather
|
|
271
|
+
system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
|
|
280
272
|
name="weather_agent",
|
|
281
273
|
),
|
|
282
274
|
create_agent(
|
|
283
275
|
model="vllm:qwen3-4b",
|
|
284
276
|
tools=[get_current_user],
|
|
285
|
-
system_prompt="You are a user
|
|
277
|
+
system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
|
|
286
278
|
name="user_agent",
|
|
287
279
|
),
|
|
288
280
|
],
|
|
@@ -295,44 +287,42 @@ print(response)
|
|
|
295
287
|
|
|
296
288
|
#### 5.2 Parallel Graph Orchestration
|
|
297
289
|
|
|
298
|
-
|
|
290
|
+
Using `create_parallel_pipeline`, you can orchestrate multiple subgraphs in parallel:
|
|
299
291
|
|
|
300
292
|
```python
|
|
301
293
|
from langchain_dev_utils.pipeline import create_parallel_pipeline
|
|
302
294
|
|
|
303
|
-
# Build
|
|
295
|
+
# Build parallel pipeline (all subgraphs execute in parallel)
|
|
304
296
|
graph = create_parallel_pipeline(
|
|
305
297
|
sub_graphs=[
|
|
306
298
|
create_agent(
|
|
307
299
|
model="vllm:qwen3-4b",
|
|
308
300
|
tools=[get_current_time],
|
|
309
|
-
system_prompt="You are a time
|
|
301
|
+
system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
|
|
310
302
|
name="time_agent",
|
|
311
303
|
),
|
|
312
304
|
create_agent(
|
|
313
305
|
model="vllm:qwen3-4b",
|
|
314
306
|
tools=[get_current_weather],
|
|
315
|
-
system_prompt="You are a weather
|
|
307
|
+
system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
|
|
316
308
|
name="weather_agent",
|
|
317
309
|
),
|
|
318
310
|
create_agent(
|
|
319
311
|
model="vllm:qwen3-4b",
|
|
320
312
|
tools=[get_current_user],
|
|
321
|
-
system_prompt="You are a user
|
|
313
|
+
system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
|
|
322
314
|
name="user_agent",
|
|
323
315
|
),
|
|
324
316
|
],
|
|
325
317
|
state_schema=AgentState,
|
|
326
318
|
)
|
|
327
|
-
|
|
328
319
|
response = graph.invoke({"messages": [HumanMessage("Hello")]})
|
|
329
320
|
print(response)
|
|
330
321
|
```
|
|
331
322
|
|
|
332
|
-
**For more information about state graph orchestration, please refer to**: [State Graph Orchestration](https://tbice123123.github.io/langchain-dev-utils-docs/en/graph-orchestration/pipeline.html)
|
|
333
323
|
|
|
334
324
|
## 💬 Join the Community
|
|
335
325
|
|
|
336
326
|
- [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
|
|
337
327
|
- [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
|
|
338
|
-
- We welcome
|
|
328
|
+
- We welcome all forms of contributions — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together
|