langchain-dev-utils 1.2.9__tar.gz → 1.2.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/.gitignore +4 -1
  2. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/PKG-INFO +52 -58
  3. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/README.md +51 -57
  4. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/README_cn.md +11 -12
  5. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/pyproject.toml +6 -2
  6. langchain_dev_utils-1.2.11/src/langchain_dev_utils/__init__.py +1 -0
  7. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +584 -572
  8. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/chat_models/base.py +3 -10
  9. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/chat_models/types.py +9 -1
  10. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_load_model.py +1 -1
  11. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/uv.lock +341 -1
  12. langchain_dev_utils-1.2.9/src/langchain_dev_utils/__init__.py +0 -1
  13. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/.python-version +0 -0
  14. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/.vscode/settings.json +0 -0
  15. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/LICENSE +0 -0
  16. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/_utils.py +0 -0
  17. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/__init__.py +0 -0
  18. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/factory.py +0 -0
  19. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/file_system.py +0 -0
  20. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  21. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  22. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
  23. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  24. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  25. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +0 -0
  26. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  27. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  28. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/plan.py +0 -0
  29. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/agents/wrap.py +0 -0
  30. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  31. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  32. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  33. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/embeddings/base.py +0 -0
  34. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  35. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/message_convert/content.py +0 -0
  36. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/message_convert/format.py +0 -0
  37. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  38. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  39. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  40. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/pipeline/types.py +0 -0
  41. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/py.typed +0 -0
  42. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  43. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  44. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  45. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/__init__.py +0 -0
  46. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_agent.py +0 -0
  47. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_chat_models.py +0 -0
  48. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_human_in_the_loop.py +0 -0
  49. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_load_embbeding.py +0 -0
  50. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_messages.py +0 -0
  51. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_model_tool_emulator.py +0 -0
  52. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_pipline.py +0 -0
  53. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_plan_middleware.py +0 -0
  54. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_router_model.py +0 -0
  55. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_tool_call_repair.py +0 -0
  56. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_tool_calling.py +0 -0
  57. {langchain_dev_utils-1.2.9 → langchain_dev_utils-1.2.11}/tests/test_wrap_agent.py +0 -0
@@ -10,4 +10,7 @@ wheels/
10
10
  .venv
11
11
  .env
12
12
  .benchmarks
13
- data/
13
+ data/
14
+ node_modules
15
+ dist
16
+ site/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.9
3
+ Version: 1.2.11
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -22,26 +22,26 @@ Description-Content-Type: text/markdown
22
22
  </p>
23
23
 
24
24
  <p align="center">
25
- 📚 <a href="https://tbice123123.github.io/langchain-dev-utils-docs/en/">English</a> •
26
- <a href="https://tbice123123.github.io/langchain-dev-utils-docs/zh/">中文</a>
25
+ 📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
26
+ <a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
27
27
  </p>
28
28
 
29
29
  [![PyPI](https://img.shields.io/pypi/v/langchain-dev-utils.svg?color=%2334D058&label=pypi%20package)](https://pypi.org/project/langchain-dev-utils/)
30
30
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
31
31
  [![Python](https://img.shields.io/badge/python-3.11|3.12|3.13|3.14-%2334D058)](https://www.python.org/downloads)
32
32
  [![Downloads](https://static.pepy.tech/badge/langchain-dev-utils/month)](https://pepy.tech/project/langchain-dev-utils)
33
- [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils-docs/en/)
33
+ [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils/)
34
34
 
35
- > This is the English version. For the Chinese version, please visit [Chinese Documentation](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
35
+ > This is the English version. For the Chinese version, please visit [中文文档](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
36
36
 
37
- **langchain-dev-utils** is a utility library focused on enhancing the development experience with LangChain and LangGraph. It provides a series of out-of-the-box utility functions that can both reduce repetitive code writing and improve code consistency and readability. By simplifying development workflows, this library helps you prototype faster, iterate more smoothly, and create clearer, more reliable LLM-based AI applications.
37
+ **langchain-dev-utils** is a utility library focused on enhancing the development experience of LangChain and LangGraph. It provides a series of ready-to-use utility functions that can reduce repetitive code writing and improve code consistency and readability. By simplifying the development workflow, this library can help you build prototypes faster, iterate more smoothly, and create clearer and more reliable AI applications based on large language models.
38
38
 
39
39
  ## 🚀 Installation
40
40
 
41
41
  ```bash
42
42
  pip install -U langchain-dev-utils
43
43
 
44
- # Install the full-featured version:
44
+ # Install full-featured version:
45
45
  pip install -U langchain-dev-utils[standard]
46
46
  ```
47
47
 
@@ -49,16 +49,16 @@ pip install -U langchain-dev-utils[standard]
49
49
 
50
50
  ### 1. **Model Management**
51
51
 
52
- In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but the model providers they support are relatively limited. This module provides a registration function (`register_model_provider`/`register_embeddings_provider`) to register any model provider for subsequent model loading using `load_chat_model` / `load_embeddings`.
52
+ In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but they support a limited number of model providers. This module provides registration functions (`register_model_provider`/`register_embeddings_provider`) to easily register any model provider for later use with `load_chat_model` / `load_embeddings` for model loading.
53
53
 
54
54
  #### 1.1 Chat Model Management
55
55
 
56
- Mainly consists of the following two functions:
56
+ There are two main functions:
57
57
 
58
58
  - `register_model_provider`: Register a chat model provider
59
59
  - `load_chat_model`: Load a chat model
60
60
 
61
- Example for integrating a qwen3-4b model deployed using `vllm`:
61
+ Assuming you want to use the qwen3-4b model deployed with `vllm`, the reference code is as follows:
62
62
 
63
63
  ```python
64
64
  from langchain_dev_utils.chat_models import (
@@ -80,12 +80,12 @@ print(model.invoke("Hello"))
80
80
 
81
81
  #### 1.2 Embedding Model Management
82
82
 
83
- Mainly consists of the following two functions:
83
+ There are two main functions:
84
84
 
85
85
  - `register_embeddings_provider`: Register an embedding model provider
86
86
  - `load_embeddings`: Load an embedding model
87
87
 
88
- Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
88
+ Assuming you want to use the qwen3-embedding-4b model deployed with `vllm`, the reference code is as follows:
89
89
 
90
90
  ```python
91
91
  from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
@@ -103,23 +103,21 @@ emb = embeddings.embed_query("Hello")
103
103
  print(emb)
104
104
  ```
105
105
 
106
- **For more information about model management, please refer to**: [Chat Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/chat.html), [Embedding Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/embedding.html)
107
106
 
108
107
  ### 2. **Message Conversion**
109
108
 
110
109
  Includes the following features:
111
110
 
112
- - Merge reasoning content into the final response
111
+ - Merge chain-of-thought content into final responses
113
112
  - Stream content merging
114
113
  - Content formatting tools
115
114
 
116
115
  #### 2.1 Stream Content Merging
117
116
 
118
- For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
117
+ For streaming responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
119
118
 
120
119
  ```python
121
120
  from langchain_dev_utils.message_convert import merge_ai_message_chunk
122
-
123
121
  chunks = list(model.stream("Hello"))
124
122
  merged = merge_ai_message_chunk(chunks)
125
123
  ```
@@ -137,7 +135,6 @@ text = format_sequence([
137
135
  ], separator="\n", with_num=True)
138
136
  ```
139
137
 
140
- **For more information about message conversion, please refer to**: [Message Process](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/message.html), [Formatting List Content](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/format.html)
141
138
 
142
139
  ### 3. **Tool Calling**
143
140
 
@@ -157,10 +154,10 @@ from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_callin
157
154
 
158
155
  @tool
159
156
  def get_current_time() -> str:
160
- """Get the current timestamp"""
157
+ """Get current timestamp"""
161
158
  return str(datetime.datetime.now().timestamp())
162
159
 
163
- response = model.bind_tools([get_current_time]).invoke("What time is it?")
160
+ response = model.bind_tools([get_current_time]).invoke("What time is it now?")
164
161
 
165
162
  if has_tool_calling(response):
166
163
  name, args = parse_tool_calling(
@@ -184,40 +181,43 @@ import datetime
184
181
  @human_in_the_loop
185
182
  @tool
186
183
  def get_current_time() -> str:
187
- """Get the current timestamp"""
184
+ """Get current timestamp"""
188
185
  return str(datetime.datetime.now().timestamp())
189
186
  ```
190
187
 
191
- **For more information about tool calling, please refer to**: [Add Human-in-the-Loop Support](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/human-in-the-loop.html), [Tool Call Handling](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/tool.html)
192
188
 
193
189
  ### 4. **Agent Development**
194
190
 
195
- Includes the following capabilities:
191
+ Includes the following features:
196
192
 
197
- - Multi-agent construction
198
- - Commonly used middleware components
193
+ - Multi-agent construction
194
+ - Common middleware components
199
195
 
200
196
  #### 4.1 Multi-Agent Construction
201
197
 
202
- Wrapping an agent as a tool is a common implementation pattern in multi-agent systems, as elaborated in the official LangChain documentation. To support this pattern, this library provides a pre-built utility function `wrap_agent_as_tool`, which encapsulates an agent instance into a tool that can be invoked by other agents.
198
+ Wrapping agents as tools is a common implementation pattern in multi-agent systems, which is detailed in the official LangChain documentation. To this end, this library provides a pre-built function `wrap_agent_as_tool` to implement this pattern, which can wrap an agent instance into a tool that can be called by other agents.
203
199
 
204
- **Usage Example**:
200
+ Usage example:
205
201
 
206
202
  ```python
207
203
  import datetime
208
204
  from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
209
205
  from langchain.agents import AgentState
210
206
 
211
-
212
207
  @tool
213
208
  def get_current_time() -> str:
214
- """Get the current time."""
209
+ """Get current time"""
215
210
  return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
216
211
 
212
+ time_agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
213
+ call_time_agent_tool = wrap_agent_as_tool(time_agent)
217
214
 
218
- agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
219
- call_time_agent_tool = wrap_agent_as_tool(agent)
220
- response = call_time_agent_tool.invoke(
215
+ agent = create_agent(
216
+ "vllm:qwen3-4b",
217
+ name="agent",
218
+ tools=[call_time_agent_tool],
219
+ )
220
+ response = agent.invoke(
221
221
  {"messages": [{"role": "user", "content": "What time is it now?"}]}
222
222
  )
223
223
  print(response)
@@ -225,10 +225,11 @@ print(response)
225
225
 
226
226
  #### 4.2 Middleware
227
227
 
228
- Provides several commonly used middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
228
+ Provides some common middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
229
229
 
230
- - `ToolCallRepairMiddleware` automatically repairs malformed tool calls found in the model's `invalid_tool_calls` output.
231
- - `PlanMiddleware` enables task planning capabilities for agents.
230
+ `ToolCallRepairMiddleware` is used to fix `invaild_tool_calls` content from large models.
231
+
232
+ `PlanMiddleware` is used for agent planning.
232
233
 
233
234
  ```python
234
235
  from langchain_dev_utils.agents.middleware import (
@@ -236,33 +237,28 @@ from langchain_dev_utils.agents.middleware import (
236
237
  PlanMiddleware,
237
238
  )
238
239
 
239
- agent = create_agent(
240
+ agent=create_agent(
240
241
  "vllm:qwen3-4b",
241
242
  name="plan-agent",
242
- middleware=[
243
- ToolCallRepairMiddleware(),
244
- PlanMiddleware(use_read_plan_tool=False)
245
- ]
243
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
244
+ use_read_plan_tool=False
245
+ )]
246
246
  )
247
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan for visiting New York."}]})
247
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
248
248
  print(response)
249
249
  ```
250
250
 
251
- **For more details on agent development and a complete list of built-in middleware, please refer to**:
252
- [Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
253
- [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
254
-
255
251
 
256
252
  ### 5. **State Graph Orchestration**
257
253
 
258
- Includes the following capabilities:
254
+ Includes the following features:
259
255
 
260
256
  - Sequential graph orchestration
261
257
  - Parallel graph orchestration
262
258
 
263
259
  #### 5.1 Sequential Graph Orchestration
264
260
 
265
- Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
261
+ Using `create_sequential_pipeline`, you can orchestrate multiple subgraphs in sequence:
266
262
 
267
263
  ```python
268
264
  from langchain.agents import AgentState
@@ -277,25 +273,25 @@ register_model_provider(
277
273
  base_url="http://localhost:8000/v1",
278
274
  )
279
275
 
280
- # Build a sequential pipeline (all subgraphs executed in order)
276
+ # Build sequential pipeline (all subgraphs execute in sequence)
281
277
  graph = create_sequential_pipeline(
282
278
  sub_graphs=[
283
279
  create_agent(
284
280
  model="vllm:qwen3-4b",
285
281
  tools=[get_current_time],
286
- system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
282
+ system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
287
283
  name="time_agent",
288
284
  ),
289
285
  create_agent(
290
286
  model="vllm:qwen3-4b",
291
287
  tools=[get_current_weather],
292
- system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
288
+ system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
293
289
  name="weather_agent",
294
290
  ),
295
291
  create_agent(
296
292
  model="vllm:qwen3-4b",
297
293
  tools=[get_current_user],
298
- system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
294
+ system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
299
295
  name="user_agent",
300
296
  ),
301
297
  ],
@@ -308,44 +304,42 @@ print(response)
308
304
 
309
305
  #### 5.2 Parallel Graph Orchestration
310
306
 
311
- Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
307
+ Using `create_parallel_pipeline`, you can orchestrate multiple subgraphs in parallel:
312
308
 
313
309
  ```python
314
310
  from langchain_dev_utils.pipeline import create_parallel_pipeline
315
311
 
316
- # Build a parallel pipeline (all subgraphs executed concurrently)
312
+ # Build parallel pipeline (all subgraphs execute in parallel)
317
313
  graph = create_parallel_pipeline(
318
314
  sub_graphs=[
319
315
  create_agent(
320
316
  model="vllm:qwen3-4b",
321
317
  tools=[get_current_time],
322
- system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
318
+ system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
323
319
  name="time_agent",
324
320
  ),
325
321
  create_agent(
326
322
  model="vllm:qwen3-4b",
327
323
  tools=[get_current_weather],
328
- system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
324
+ system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
329
325
  name="weather_agent",
330
326
  ),
331
327
  create_agent(
332
328
  model="vllm:qwen3-4b",
333
329
  tools=[get_current_user],
334
- system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
330
+ system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
335
331
  name="user_agent",
336
332
  ),
337
333
  ],
338
334
  state_schema=AgentState,
339
335
  )
340
-
341
336
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
342
337
  print(response)
343
338
  ```
344
339
 
345
- **For more information about state graph orchestration, please refer to**: [State Graph Orchestration](https://tbice123123.github.io/langchain-dev-utils-docs/en/graph-orchestration/pipeline.html)
346
340
 
347
341
  ## 💬 Join the Community
348
342
 
349
343
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
350
344
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
351
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
345
+ - We welcome all forms of contributions — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together
@@ -5,26 +5,26 @@
5
5
  </p>
6
6
 
7
7
  <p align="center">
8
- 📚 <a href="https://tbice123123.github.io/langchain-dev-utils-docs/en/">English</a> •
9
- <a href="https://tbice123123.github.io/langchain-dev-utils-docs/zh/">中文</a>
8
+ 📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
9
+ <a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
10
10
  </p>
11
11
 
12
12
  [![PyPI](https://img.shields.io/pypi/v/langchain-dev-utils.svg?color=%2334D058&label=pypi%20package)](https://pypi.org/project/langchain-dev-utils/)
13
13
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
14
14
  [![Python](https://img.shields.io/badge/python-3.11|3.12|3.13|3.14-%2334D058)](https://www.python.org/downloads)
15
15
  [![Downloads](https://static.pepy.tech/badge/langchain-dev-utils/month)](https://pepy.tech/project/langchain-dev-utils)
16
- [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils-docs/en/)
16
+ [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils/)
17
17
 
18
- > This is the English version. For the Chinese version, please visit [Chinese Documentation](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
18
+ > This is the English version. For the Chinese version, please visit [中文文档](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
19
19
 
20
- **langchain-dev-utils** is a utility library focused on enhancing the development experience with LangChain and LangGraph. It provides a series of out-of-the-box utility functions that can both reduce repetitive code writing and improve code consistency and readability. By simplifying development workflows, this library helps you prototype faster, iterate more smoothly, and create clearer, more reliable LLM-based AI applications.
20
+ **langchain-dev-utils** is a utility library focused on enhancing the development experience of LangChain and LangGraph. It provides a series of ready-to-use utility functions that can reduce repetitive code writing and improve code consistency and readability. By simplifying the development workflow, this library can help you build prototypes faster, iterate more smoothly, and create clearer and more reliable AI applications based on large language models.
21
21
 
22
22
  ## 🚀 Installation
23
23
 
24
24
  ```bash
25
25
  pip install -U langchain-dev-utils
26
26
 
27
- # Install the full-featured version:
27
+ # Install full-featured version:
28
28
  pip install -U langchain-dev-utils[standard]
29
29
  ```
30
30
 
@@ -32,16 +32,16 @@ pip install -U langchain-dev-utils[standard]
32
32
 
33
33
  ### 1. **Model Management**
34
34
 
35
- In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but the model providers they support are relatively limited. This module provides a registration function (`register_model_provider`/`register_embeddings_provider`) to register any model provider for subsequent model loading using `load_chat_model` / `load_embeddings`.
35
+ In `langchain`, the `init_chat_model`/`init_embeddings` functions can be used to initialize chat model instances/embedding model instances, but they support a limited number of model providers. This module provides registration functions (`register_model_provider`/`register_embeddings_provider`) to easily register any model provider for later use with `load_chat_model` / `load_embeddings` for model loading.
36
36
 
37
37
  #### 1.1 Chat Model Management
38
38
 
39
- Mainly consists of the following two functions:
39
+ There are two main functions:
40
40
 
41
41
  - `register_model_provider`: Register a chat model provider
42
42
  - `load_chat_model`: Load a chat model
43
43
 
44
- Example for integrating a qwen3-4b model deployed using `vllm`:
44
+ Assuming you want to use the qwen3-4b model deployed with `vllm`, the reference code is as follows:
45
45
 
46
46
  ```python
47
47
  from langchain_dev_utils.chat_models import (
@@ -63,12 +63,12 @@ print(model.invoke("Hello"))
63
63
 
64
64
  #### 1.2 Embedding Model Management
65
65
 
66
- Mainly consists of the following two functions:
66
+ There are two main functions:
67
67
 
68
68
  - `register_embeddings_provider`: Register an embedding model provider
69
69
  - `load_embeddings`: Load an embedding model
70
70
 
71
- Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
71
+ Assuming you want to use the qwen3-embedding-4b model deployed with `vllm`, the reference code is as follows:
72
72
 
73
73
  ```python
74
74
  from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
@@ -86,23 +86,21 @@ emb = embeddings.embed_query("Hello")
86
86
  print(emb)
87
87
  ```
88
88
 
89
- **For more information about model management, please refer to**: [Chat Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/chat.html), [Embedding Model Management](https://tbice123123.github.io/langchain-dev-utils-docs/en/model-management/embedding.html)
90
89
 
91
90
  ### 2. **Message Conversion**
92
91
 
93
92
  Includes the following features:
94
93
 
95
- - Merge reasoning content into the final response
94
+ - Merge chain-of-thought content into final responses
96
95
  - Stream content merging
97
96
  - Content formatting tools
98
97
 
99
98
  #### 2.1 Stream Content Merging
100
99
 
101
- For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
100
+ For streaming responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
102
101
 
103
102
  ```python
104
103
  from langchain_dev_utils.message_convert import merge_ai_message_chunk
105
-
106
104
  chunks = list(model.stream("Hello"))
107
105
  merged = merge_ai_message_chunk(chunks)
108
106
  ```
@@ -120,7 +118,6 @@ text = format_sequence([
120
118
  ], separator="\n", with_num=True)
121
119
  ```
122
120
 
123
- **For more information about message conversion, please refer to**: [Message Process](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/message.html), [Formatting List Content](https://tbice123123.github.io/langchain-dev-utils-docs/en/message-conversion/format.html)
124
121
 
125
122
  ### 3. **Tool Calling**
126
123
 
@@ -140,10 +137,10 @@ from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_callin
140
137
 
141
138
  @tool
142
139
  def get_current_time() -> str:
143
- """Get the current timestamp"""
140
+ """Get current timestamp"""
144
141
  return str(datetime.datetime.now().timestamp())
145
142
 
146
- response = model.bind_tools([get_current_time]).invoke("What time is it?")
143
+ response = model.bind_tools([get_current_time]).invoke("What time is it now?")
147
144
 
148
145
  if has_tool_calling(response):
149
146
  name, args = parse_tool_calling(
@@ -167,40 +164,43 @@ import datetime
167
164
  @human_in_the_loop
168
165
  @tool
169
166
  def get_current_time() -> str:
170
- """Get the current timestamp"""
167
+ """Get current timestamp"""
171
168
  return str(datetime.datetime.now().timestamp())
172
169
  ```
173
170
 
174
- **For more information about tool calling, please refer to**: [Add Human-in-the-Loop Support](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/human-in-the-loop.html), [Tool Call Handling](https://tbice123123.github.io/langchain-dev-utils-docs/en/tool-calling/tool.html)
175
171
 
176
172
  ### 4. **Agent Development**
177
173
 
178
- Includes the following capabilities:
174
+ Includes the following features:
179
175
 
180
- - Multi-agent construction
181
- - Commonly used middleware components
176
+ - Multi-agent construction
177
+ - Common middleware components
182
178
 
183
179
  #### 4.1 Multi-Agent Construction
184
180
 
185
- Wrapping an agent as a tool is a common implementation pattern in multi-agent systems, as elaborated in the official LangChain documentation. To support this pattern, this library provides a pre-built utility function `wrap_agent_as_tool`, which encapsulates an agent instance into a tool that can be invoked by other agents.
181
+ Wrapping agents as tools is a common implementation pattern in multi-agent systems, which is detailed in the official LangChain documentation. To this end, this library provides a pre-built function `wrap_agent_as_tool` to implement this pattern, which can wrap an agent instance into a tool that can be called by other agents.
186
182
 
187
- **Usage Example**:
183
+ Usage example:
188
184
 
189
185
  ```python
190
186
  import datetime
191
187
  from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
192
188
  from langchain.agents import AgentState
193
189
 
194
-
195
190
  @tool
196
191
  def get_current_time() -> str:
197
- """Get the current time."""
192
+ """Get current time"""
198
193
  return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
199
194
 
195
+ time_agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
196
+ call_time_agent_tool = wrap_agent_as_tool(time_agent)
200
197
 
201
- agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
202
- call_time_agent_tool = wrap_agent_as_tool(agent)
203
- response = call_time_agent_tool.invoke(
198
+ agent = create_agent(
199
+ "vllm:qwen3-4b",
200
+ name="agent",
201
+ tools=[call_time_agent_tool],
202
+ )
203
+ response = agent.invoke(
204
204
  {"messages": [{"role": "user", "content": "What time is it now?"}]}
205
205
  )
206
206
  print(response)
@@ -208,10 +208,11 @@ print(response)
208
208
 
209
209
  #### 4.2 Middleware
210
210
 
211
- Provides several commonly used middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
211
+ Provides some common middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
212
212
 
213
- - `ToolCallRepairMiddleware` automatically repairs malformed tool calls found in the model's `invalid_tool_calls` output.
214
- - `PlanMiddleware` enables task planning capabilities for agents.
213
+ `ToolCallRepairMiddleware` is used to fix `invaild_tool_calls` content from large models.
214
+
215
+ `PlanMiddleware` is used for agent planning.
215
216
 
216
217
  ```python
217
218
  from langchain_dev_utils.agents.middleware import (
@@ -219,33 +220,28 @@ from langchain_dev_utils.agents.middleware import (
219
220
  PlanMiddleware,
220
221
  )
221
222
 
222
- agent = create_agent(
223
+ agent=create_agent(
223
224
  "vllm:qwen3-4b",
224
225
  name="plan-agent",
225
- middleware=[
226
- ToolCallRepairMiddleware(),
227
- PlanMiddleware(use_read_plan_tool=False)
228
- ]
226
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
227
+ use_read_plan_tool=False
228
+ )]
229
229
  )
230
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan for visiting New York."}]})
230
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
231
231
  print(response)
232
232
  ```
233
233
 
234
- **For more details on agent development and a complete list of built-in middleware, please refer to**:
235
- [Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
236
- [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
237
-
238
234
 
239
235
  ### 5. **State Graph Orchestration**
240
236
 
241
- Includes the following capabilities:
237
+ Includes the following features:
242
238
 
243
239
  - Sequential graph orchestration
244
240
  - Parallel graph orchestration
245
241
 
246
242
  #### 5.1 Sequential Graph Orchestration
247
243
 
248
- Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
244
+ Using `create_sequential_pipeline`, you can orchestrate multiple subgraphs in sequence:
249
245
 
250
246
  ```python
251
247
  from langchain.agents import AgentState
@@ -260,25 +256,25 @@ register_model_provider(
260
256
  base_url="http://localhost:8000/v1",
261
257
  )
262
258
 
263
- # Build a sequential pipeline (all subgraphs executed in order)
259
+ # Build sequential pipeline (all subgraphs execute in sequence)
264
260
  graph = create_sequential_pipeline(
265
261
  sub_graphs=[
266
262
  create_agent(
267
263
  model="vllm:qwen3-4b",
268
264
  tools=[get_current_time],
269
- system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
265
+ system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
270
266
  name="time_agent",
271
267
  ),
272
268
  create_agent(
273
269
  model="vllm:qwen3-4b",
274
270
  tools=[get_current_weather],
275
- system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
271
+ system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
276
272
  name="weather_agent",
277
273
  ),
278
274
  create_agent(
279
275
  model="vllm:qwen3-4b",
280
276
  tools=[get_current_user],
281
- system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
277
+ system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
282
278
  name="user_agent",
283
279
  ),
284
280
  ],
@@ -291,44 +287,42 @@ print(response)
291
287
 
292
288
  #### 5.2 Parallel Graph Orchestration
293
289
 
294
- Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
290
+ Using `create_parallel_pipeline`, you can orchestrate multiple subgraphs in parallel:
295
291
 
296
292
  ```python
297
293
  from langchain_dev_utils.pipeline import create_parallel_pipeline
298
294
 
299
- # Build a parallel pipeline (all subgraphs executed concurrently)
295
+ # Build parallel pipeline (all subgraphs execute in parallel)
300
296
  graph = create_parallel_pipeline(
301
297
  sub_graphs=[
302
298
  create_agent(
303
299
  model="vllm:qwen3-4b",
304
300
  tools=[get_current_time],
305
- system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
301
+ system_prompt="You are a time query assistant, you can only answer the current time. If this question is not related to time, please directly answer that you cannot answer",
306
302
  name="time_agent",
307
303
  ),
308
304
  create_agent(
309
305
  model="vllm:qwen3-4b",
310
306
  tools=[get_current_weather],
311
- system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
307
+ system_prompt="You are a weather query assistant, you can only answer the current weather. If this question is not related to weather, please directly answer that you cannot answer",
312
308
  name="weather_agent",
313
309
  ),
314
310
  create_agent(
315
311
  model="vllm:qwen3-4b",
316
312
  tools=[get_current_user],
317
- system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
313
+ system_prompt="You are a user query assistant, you can only answer the current user. If this question is not related to users, please directly answer that you cannot answer",
318
314
  name="user_agent",
319
315
  ),
320
316
  ],
321
317
  state_schema=AgentState,
322
318
  )
323
-
324
319
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
325
320
  print(response)
326
321
  ```
327
322
 
328
- **For more information about state graph orchestration, please refer to**: [State Graph Orchestration](https://tbice123123.github.io/langchain-dev-utils-docs/en/graph-orchestration/pipeline.html)
329
323
 
330
324
  ## 💬 Join the Community
331
325
 
332
326
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
333
327
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
334
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
328
+ - We welcome all forms of contributions — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together