langchain-dev-utils 1.2.6__tar.gz → 1.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/PKG-INFO +24 -119
  2. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/README.md +22 -118
  3. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/README_cn.md +10 -108
  4. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/pyproject.toml +8 -2
  5. langchain_dev_utils-1.2.8/src/langchain_dev_utils/__init__.py +1 -0
  6. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/_utils.py +9 -5
  7. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/__init__.py +0 -1
  8. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/factory.py +2 -10
  9. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/file_system.py +1 -1
  10. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/__init__.py +2 -0
  11. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
  12. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/model_router.py +37 -46
  13. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/plan.py +17 -18
  14. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/summarization.py +6 -4
  15. langchain_dev_utils-1.2.8/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  16. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
  17. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
  18. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/plan.py +1 -1
  19. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/wrap.py +8 -20
  20. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +105 -59
  21. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/base.py +30 -15
  22. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/types.py +6 -3
  23. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/embeddings/base.py +35 -18
  24. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/message_convert/__init__.py +0 -1
  25. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/message_convert/content.py +8 -11
  26. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/message_convert/format.py +2 -2
  27. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/parallel.py +10 -41
  28. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/sequential.py +6 -21
  29. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
  30. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/tool_calling/utils.py +3 -3
  31. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/__init__.py +1 -3
  32. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_agent.py +1 -1
  33. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_human_in_the_loop.py +2 -3
  34. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_load_embbeding.py +1 -1
  35. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_model_tool_emulator.py +1 -1
  36. langchain_dev_utils-1.2.8/tests/test_tool_call_repair.py +154 -0
  37. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_wrap_agent.py +2 -1
  38. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/uv.lock +453 -1
  39. langchain_dev_utils-1.2.6/src/langchain_dev_utils/__init__.py +0 -1
  40. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/.gitignore +0 -0
  41. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/.python-version +0 -0
  42. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/.vscode/settings.json +0 -0
  43. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/LICENSE +0 -0
  44. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  45. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  46. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  47. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  48. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/types.py +0 -0
  49. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/py.typed +0 -0
  50. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  51. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_chat_models.py +1 -1
  52. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_load_model.py +1 -1
  53. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_messages.py +1 -1
  54. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_pipline.py +0 -0
  55. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_plan_middleware.py +0 -0
  56. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_router_model.py +0 -0
  57. {langchain_dev_utils-1.2.6 → langchain_dev_utils-1.2.8}/tests/test_tool_calling.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.6
3
+ Version: 1.2.8
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -11,6 +11,7 @@ Requires-Python: >=3.11
11
11
  Requires-Dist: langchain>=1.1.0
12
12
  Requires-Dist: langgraph>=1.0.0
13
13
  Provides-Extra: standard
14
+ Requires-Dist: json-repair>=0.53.1; extra == 'standard'
14
15
  Requires-Dist: langchain-openai; extra == 'standard'
15
16
  Description-Content-Type: text/markdown
16
17
 
@@ -57,24 +58,6 @@ Mainly consists of the following two functions:
57
58
  - `register_model_provider`: Register a chat model provider
58
59
  - `load_chat_model`: Load a chat model
59
60
 
60
- **`register_model_provider` Parameters:**
61
-
62
- | Parameter | Type | Required | Default | Description |
63
- |-----------|------|----------|---------|-------------|
64
- | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
- | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
- | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
- | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
- | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
69
-
70
- **`load_chat_model` Parameters:**
71
-
72
- | Parameter | Type | Required | Default | Description |
73
- |-----------|------|----------|---------|-------------|
74
- | `model` | str | Yes | - | Chat model name |
75
- | `model_provider` | str | No | - | Chat model provider name |
76
- | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
77
-
78
61
  Example for integrating a qwen3-4b model deployed using `vllm`:
79
62
 
80
63
  ```python
@@ -102,22 +85,6 @@ Mainly consists of the following two functions:
102
85
  - `register_embeddings_provider`: Register an embedding model provider
103
86
  - `load_embeddings`: Load an embedding model
104
87
 
105
- **`register_embeddings_provider` Parameters:**
106
-
107
- | Parameter | Type | Required | Default | Description |
108
- |-----------|------|----------|---------|-------------|
109
- | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
- | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
- | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
112
-
113
- **`load_embeddings` Parameters:**
114
-
115
- | Parameter | Type | Required | Default | Description |
116
- |-----------|------|----------|---------|-------------|
117
- | `model` | str | Yes | - | Embedding model name |
118
- | `provider` | str | No | - | Embedding model provider name |
119
- | `kwargs` | dict | No | - | Other additional parameters |
120
-
121
88
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
122
89
 
123
90
  ```python
@@ -150,12 +117,6 @@ Includes the following features:
150
117
 
151
118
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
152
119
 
153
- **`merge_ai_message_chunk` Parameters:**
154
-
155
- | Parameter | Type | Required | Default | Description |
156
- |-----------|------|----------|---------|-------------|
157
- | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
158
-
159
120
  ```python
160
121
  from langchain_dev_utils.message_convert import merge_ai_message_chunk
161
122
 
@@ -167,14 +128,6 @@ merged = merge_ai_message_chunk(chunks)
167
128
 
168
129
  For a list, you can use `format_sequence` to format it.
169
130
 
170
- **`format_sequence` Parameters:**
171
-
172
- | Parameter | Type | Required | Default | Description |
173
- |-----------|------|----------|---------|-------------|
174
- | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
175
- | `separator` | str | No | "-" | String used to join the content |
176
- | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
177
-
178
131
  ```python
179
132
  from langchain_dev_utils.message_convert import format_sequence
180
133
  text = format_sequence([
@@ -197,19 +150,6 @@ Includes the following features:
197
150
 
198
151
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
199
152
 
200
- **`has_tool_calling` Parameters:**
201
-
202
- | Parameter | Type | Required | Default | Description |
203
- |-----------|------|----------|---------|-------------|
204
- | `message` | AIMessage | Yes | - | AIMessage object |
205
-
206
- **`parse_tool_calling` Parameters:**
207
-
208
- | Parameter | Type | Required | Default | Description |
209
- |-----------|------|----------|---------|-------------|
210
- | `message` | AIMessage | Yes | - | AIMessage object |
211
- | `first_tool_call_only` | bool | No | False | Whether to only parse the first tool call |
212
-
213
153
  ```python
214
154
  import datetime
215
155
  from langchain_core.tools import tool
@@ -259,14 +199,7 @@ Includes the following features:
259
199
 
260
200
  #### 4.1 Agent Factory Functions
261
201
 
262
- In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
263
-
264
- **`create_agent` Parameters:**
265
-
266
- | Parameter | Type | Required | Default | Description |
267
- |-----------|------|----------|---------|-------------|
268
- | `model` | str | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
269
- | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
202
+ In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
270
203
 
271
204
  Usage example:
272
205
 
@@ -281,24 +214,26 @@ print(response)
281
214
 
282
215
  #### 4.2 Middleware
283
216
 
284
- Provides some commonly used middleware components. Below are examples of `SummarizationMiddleware` and `PlanMiddleware`.
217
+ Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
285
218
 
286
- `SummarizationMiddleware` is used for agent summarization.
219
+ `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
287
220
 
288
221
  `PlanMiddleware` is used for agent planning.
289
222
 
290
223
  ```python
291
224
  from langchain_dev_utils.agents.middleware import (
292
- SummarizationMiddleware,
225
+ ToolcallRepairMiddleware,
293
226
  PlanMiddleware,
294
227
  )
295
228
 
296
- agent=create_agent(
229
+ agent = create_agent(
297
230
  "vllm:qwen3-4b",
298
231
  name="plan-agent",
299
- middleware=[PlanMiddleware(), SummarizationMiddleware(model="vllm:qwen3-4b")]
232
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
233
+ use_read_plan_tool=False
234
+ )]
300
235
  )
301
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
236
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
302
237
  print(response)
303
238
  ```
304
239
 
@@ -306,29 +241,14 @@ print(response)
306
241
 
307
242
  ### 5. **State Graph Orchestration**
308
243
 
309
- Includes the following features:
244
+ Includes the following capabilities:
310
245
 
311
246
  - Sequential graph orchestration
312
247
  - Parallel graph orchestration
313
248
 
314
249
  #### 5.1 Sequential Graph Orchestration
315
250
 
316
- Sequential graph orchestration:
317
- Uses `create_sequential_pipeline`, supported parameters:
318
-
319
- **`create_sequential_pipeline` Parameters:**
320
-
321
- | Parameter | Type | Required | Default | Description |
322
- |-----------|------|----------|---------|-------------|
323
- | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
324
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
325
- | `graph_name` | str | No | - | Name of the final generated graph |
326
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
327
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
328
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
329
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
330
- | `store` | BaseStore | No | - | LangGraph persistence Store |
331
- | `cache` | BaseCache | No | - | LangGraph Cache |
251
+ Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
332
252
 
333
253
  ```python
334
254
  from langchain.agents import AgentState
@@ -343,25 +263,25 @@ register_model_provider(
343
263
  base_url="http://localhost:8000/v1",
344
264
  )
345
265
 
346
- # Build sequential pipeline (all sub-graphs execute sequentially)
266
+ # Build a sequential pipeline (all subgraphs executed in order)
347
267
  graph = create_sequential_pipeline(
348
268
  sub_graphs=[
349
269
  create_agent(
350
270
  model="vllm:qwen3-4b",
351
271
  tools=[get_current_time],
352
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
272
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
353
273
  name="time_agent",
354
274
  ),
355
275
  create_agent(
356
276
  model="vllm:qwen3-4b",
357
277
  tools=[get_current_weather],
358
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
278
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
359
279
  name="weather_agent",
360
280
  ),
361
281
  create_agent(
362
282
  model="vllm:qwen3-4b",
363
283
  tools=[get_current_user],
364
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
284
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
365
285
  name="user_agent",
366
286
  ),
367
287
  ],
@@ -374,51 +294,36 @@ print(response)
374
294
 
375
295
  #### 5.2 Parallel Graph Orchestration
376
296
 
377
- Parallel graph orchestration:
378
- Uses `create_parallel_pipeline`, supported parameters:
379
-
380
- **`create_parallel_pipeline` Parameters:**
381
-
382
- | Parameter | Type | Required | Default | Description |
383
- |-----------|------|----------|---------|-------------|
384
- | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
385
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
386
- | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
387
- | `graph_name` | str | No | - | Name of the final generated graph |
388
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
389
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
390
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
391
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
392
- | `store` | BaseStore | No | - | LangGraph persistence Store |
393
- | `cache` | BaseCache | No | - | LangGraph Cache |
297
+ Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
394
298
 
395
299
  ```python
396
300
  from langchain_dev_utils.pipeline import create_parallel_pipeline
397
301
 
398
- # Build parallel pipeline (all sub-graphs execute in parallel)
302
+ # Build a parallel pipeline (all subgraphs executed concurrently)
399
303
  graph = create_parallel_pipeline(
400
304
  sub_graphs=[
401
305
  create_agent(
402
306
  model="vllm:qwen3-4b",
403
307
  tools=[get_current_time],
404
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
308
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
405
309
  name="time_agent",
406
310
  ),
407
311
  create_agent(
408
312
  model="vllm:qwen3-4b",
409
313
  tools=[get_current_weather],
410
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
314
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
411
315
  name="weather_agent",
412
316
  ),
413
317
  create_agent(
414
318
  model="vllm:qwen3-4b",
415
319
  tools=[get_current_user],
416
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
320
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
417
321
  name="user_agent",
418
322
  ),
419
323
  ],
420
324
  state_schema=AgentState,
421
325
  )
326
+
422
327
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
423
328
  print(response)
424
329
  ```
@@ -429,4 +334,4 @@ print(response)
429
334
 
430
335
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
431
336
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
432
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
337
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -41,24 +41,6 @@ Mainly consists of the following two functions:
41
41
  - `register_model_provider`: Register a chat model provider
42
42
  - `load_chat_model`: Load a chat model
43
43
 
44
- **`register_model_provider` Parameters:**
45
-
46
- | Parameter | Type | Required | Default | Description |
47
- |-----------|------|----------|---------|-------------|
48
- | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
49
- | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
50
- | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
51
- | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
52
- | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
53
-
54
- **`load_chat_model` Parameters:**
55
-
56
- | Parameter | Type | Required | Default | Description |
57
- |-----------|------|----------|---------|-------------|
58
- | `model` | str | Yes | - | Chat model name |
59
- | `model_provider` | str | No | - | Chat model provider name |
60
- | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
61
-
62
44
  Example for integrating a qwen3-4b model deployed using `vllm`:
63
45
 
64
46
  ```python
@@ -86,22 +68,6 @@ Mainly consists of the following two functions:
86
68
  - `register_embeddings_provider`: Register an embedding model provider
87
69
  - `load_embeddings`: Load an embedding model
88
70
 
89
- **`register_embeddings_provider` Parameters:**
90
-
91
- | Parameter | Type | Required | Default | Description |
92
- |-----------|------|----------|---------|-------------|
93
- | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
94
- | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
95
- | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
96
-
97
- **`load_embeddings` Parameters:**
98
-
99
- | Parameter | Type | Required | Default | Description |
100
- |-----------|------|----------|---------|-------------|
101
- | `model` | str | Yes | - | Embedding model name |
102
- | `provider` | str | No | - | Embedding model provider name |
103
- | `kwargs` | dict | No | - | Other additional parameters |
104
-
105
71
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
106
72
 
107
73
  ```python
@@ -134,12 +100,6 @@ Includes the following features:
134
100
 
135
101
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
136
102
 
137
- **`merge_ai_message_chunk` Parameters:**
138
-
139
- | Parameter | Type | Required | Default | Description |
140
- |-----------|------|----------|---------|-------------|
141
- | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
142
-
143
103
  ```python
144
104
  from langchain_dev_utils.message_convert import merge_ai_message_chunk
145
105
 
@@ -151,14 +111,6 @@ merged = merge_ai_message_chunk(chunks)
151
111
 
152
112
  For a list, you can use `format_sequence` to format it.
153
113
 
154
- **`format_sequence` Parameters:**
155
-
156
- | Parameter | Type | Required | Default | Description |
157
- |-----------|------|----------|---------|-------------|
158
- | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
159
- | `separator` | str | No | "-" | String used to join the content |
160
- | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
161
-
162
114
  ```python
163
115
  from langchain_dev_utils.message_convert import format_sequence
164
116
  text = format_sequence([
@@ -181,19 +133,6 @@ Includes the following features:
181
133
 
182
134
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
183
135
 
184
- **`has_tool_calling` Parameters:**
185
-
186
- | Parameter | Type | Required | Default | Description |
187
- |-----------|------|----------|---------|-------------|
188
- | `message` | AIMessage | Yes | - | AIMessage object |
189
-
190
- **`parse_tool_calling` Parameters:**
191
-
192
- | Parameter | Type | Required | Default | Description |
193
- |-----------|------|----------|---------|-------------|
194
- | `message` | AIMessage | Yes | - | AIMessage object |
195
- | `first_tool_call_only` | bool | No | False | Whether to only parse the first tool call |
196
-
197
136
  ```python
198
137
  import datetime
199
138
  from langchain_core.tools import tool
@@ -243,14 +182,7 @@ Includes the following features:
243
182
 
244
183
  #### 4.1 Agent Factory Functions
245
184
 
246
- In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
247
-
248
- **`create_agent` Parameters:**
249
-
250
- | Parameter | Type | Required | Default | Description |
251
- |-----------|------|----------|---------|-------------|
252
- | `model` | str | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
253
- | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
185
+ In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
254
186
 
255
187
  Usage example:
256
188
 
@@ -265,24 +197,26 @@ print(response)
265
197
 
266
198
  #### 4.2 Middleware
267
199
 
268
- Provides some commonly used middleware components. Below are examples of `SummarizationMiddleware` and `PlanMiddleware`.
200
+ Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
269
201
 
270
- `SummarizationMiddleware` is used for agent summarization.
202
+ `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
271
203
 
272
204
  `PlanMiddleware` is used for agent planning.
273
205
 
274
206
  ```python
275
207
  from langchain_dev_utils.agents.middleware import (
276
- SummarizationMiddleware,
208
+ ToolcallRepairMiddleware,
277
209
  PlanMiddleware,
278
210
  )
279
211
 
280
- agent=create_agent(
212
+ agent = create_agent(
281
213
  "vllm:qwen3-4b",
282
214
  name="plan-agent",
283
- middleware=[PlanMiddleware(), SummarizationMiddleware(model="vllm:qwen3-4b")]
215
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
216
+ use_read_plan_tool=False
217
+ )]
284
218
  )
285
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
219
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
286
220
  print(response)
287
221
  ```
288
222
 
@@ -290,29 +224,14 @@ print(response)
290
224
 
291
225
  ### 5. **State Graph Orchestration**
292
226
 
293
- Includes the following features:
227
+ Includes the following capabilities:
294
228
 
295
229
  - Sequential graph orchestration
296
230
  - Parallel graph orchestration
297
231
 
298
232
  #### 5.1 Sequential Graph Orchestration
299
233
 
300
- Sequential graph orchestration:
301
- Uses `create_sequential_pipeline`, supported parameters:
302
-
303
- **`create_sequential_pipeline` Parameters:**
304
-
305
- | Parameter | Type | Required | Default | Description |
306
- |-----------|------|----------|---------|-------------|
307
- | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
308
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
309
- | `graph_name` | str | No | - | Name of the final generated graph |
310
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
311
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
312
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
313
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
314
- | `store` | BaseStore | No | - | LangGraph persistence Store |
315
- | `cache` | BaseCache | No | - | LangGraph Cache |
234
+ Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
316
235
 
317
236
  ```python
318
237
  from langchain.agents import AgentState
@@ -327,25 +246,25 @@ register_model_provider(
327
246
  base_url="http://localhost:8000/v1",
328
247
  )
329
248
 
330
- # Build sequential pipeline (all sub-graphs execute sequentially)
249
+ # Build a sequential pipeline (all subgraphs executed in order)
331
250
  graph = create_sequential_pipeline(
332
251
  sub_graphs=[
333
252
  create_agent(
334
253
  model="vllm:qwen3-4b",
335
254
  tools=[get_current_time],
336
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
255
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
337
256
  name="time_agent",
338
257
  ),
339
258
  create_agent(
340
259
  model="vllm:qwen3-4b",
341
260
  tools=[get_current_weather],
342
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
261
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
343
262
  name="weather_agent",
344
263
  ),
345
264
  create_agent(
346
265
  model="vllm:qwen3-4b",
347
266
  tools=[get_current_user],
348
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
267
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
349
268
  name="user_agent",
350
269
  ),
351
270
  ],
@@ -358,51 +277,36 @@ print(response)
358
277
 
359
278
  #### 5.2 Parallel Graph Orchestration
360
279
 
361
- Parallel graph orchestration:
362
- Uses `create_parallel_pipeline`, supported parameters:
363
-
364
- **`create_parallel_pipeline` Parameters:**
365
-
366
- | Parameter | Type | Required | Default | Description |
367
- |-----------|------|----------|---------|-------------|
368
- | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
369
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
370
- | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
371
- | `graph_name` | str | No | - | Name of the final generated graph |
372
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
373
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
374
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
375
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
376
- | `store` | BaseStore | No | - | LangGraph persistence Store |
377
- | `cache` | BaseCache | No | - | LangGraph Cache |
280
+ Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
378
281
 
379
282
  ```python
380
283
  from langchain_dev_utils.pipeline import create_parallel_pipeline
381
284
 
382
- # Build parallel pipeline (all sub-graphs execute in parallel)
285
+ # Build a parallel pipeline (all subgraphs executed concurrently)
383
286
  graph = create_parallel_pipeline(
384
287
  sub_graphs=[
385
288
  create_agent(
386
289
  model="vllm:qwen3-4b",
387
290
  tools=[get_current_time],
388
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
291
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
389
292
  name="time_agent",
390
293
  ),
391
294
  create_agent(
392
295
  model="vllm:qwen3-4b",
393
296
  tools=[get_current_weather],
394
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
297
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
395
298
  name="weather_agent",
396
299
  ),
397
300
  create_agent(
398
301
  model="vllm:qwen3-4b",
399
302
  tools=[get_current_user],
400
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
303
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
401
304
  name="user_agent",
402
305
  ),
403
306
  ],
404
307
  state_schema=AgentState,
405
308
  )
309
+
406
310
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
407
311
  print(response)
408
312
  ```
@@ -413,4 +317,4 @@ print(response)
413
317
 
414
318
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
415
319
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
416
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
320
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!