langchain-dev-utils 1.2.5__tar.gz → 1.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/PKG-INFO +28 -120
  2. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/README.md +26 -119
  3. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/README_cn.md +13 -109
  4. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/pyproject.toml +8 -2
  5. langchain_dev_utils-1.2.7/src/langchain_dev_utils/__init__.py +1 -0
  6. langchain_dev_utils-1.2.7/src/langchain_dev_utils/_utils.py +42 -0
  7. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/__init__.py +0 -1
  8. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/factory.py +2 -10
  9. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/file_system.py +1 -1
  10. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/__init__.py +2 -0
  11. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
  12. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/model_router.py +37 -46
  13. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/plan.py +17 -18
  14. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/summarization.py +6 -4
  15. langchain_dev_utils-1.2.7/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  16. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
  17. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
  18. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/plan.py +1 -1
  19. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/agents/wrap.py +8 -20
  20. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +33 -17
  21. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/chat_models/base.py +38 -50
  22. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/chat_models/types.py +0 -1
  23. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/embeddings/base.py +40 -46
  24. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/message_convert/__init__.py +0 -1
  25. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/message_convert/content.py +8 -11
  26. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/message_convert/format.py +2 -2
  27. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/pipeline/parallel.py +10 -41
  28. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/pipeline/sequential.py +6 -21
  29. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
  30. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/tool_calling/utils.py +3 -3
  31. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/__init__.py +1 -3
  32. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_agent.py +1 -1
  33. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_human_in_the_loop.py +2 -3
  34. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_load_embbeding.py +1 -1
  35. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_model_tool_emulator.py +1 -1
  36. langchain_dev_utils-1.2.7/tests/test_tool_call_repair.py +154 -0
  37. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_wrap_agent.py +2 -1
  38. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/uv.lock +453 -1
  39. langchain_dev_utils-1.2.5/src/langchain_dev_utils/__init__.py +0 -1
  40. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/.gitignore +0 -0
  41. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/.python-version +0 -0
  42. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/.vscode/settings.json +0 -0
  43. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/LICENSE +0 -0
  44. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  45. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  46. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  47. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  48. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/pipeline/types.py +0 -0
  49. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/py.typed +0 -0
  50. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  51. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_chat_models.py +1 -1
  52. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_load_model.py +1 -1
  53. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_messages.py +1 -1
  54. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_pipline.py +0 -0
  55. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_plan_middleware.py +0 -0
  56. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_router_model.py +0 -0
  57. {langchain_dev_utils-1.2.5 → langchain_dev_utils-1.2.7}/tests/test_tool_calling.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.5
3
+ Version: 1.2.7
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -11,6 +11,7 @@ Requires-Python: >=3.11
11
11
  Requires-Dist: langchain>=1.1.0
12
12
  Requires-Dist: langgraph>=1.0.0
13
13
  Provides-Extra: standard
14
+ Requires-Dist: json-repair>=0.53.1; extra == 'standard'
14
15
  Requires-Dist: langchain-openai; extra == 'standard'
15
16
  Description-Content-Type: text/markdown
16
17
 
@@ -57,24 +58,6 @@ Mainly consists of the following two functions:
57
58
  - `register_model_provider`: Register a chat model provider
58
59
  - `load_chat_model`: Load a chat model
59
60
 
60
- **`register_model_provider` Parameters:**
61
-
62
- | Parameter | Type | Required | Default | Description |
63
- |-----------|------|----------|---------|-------------|
64
- | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
- | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
- | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
- | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
- | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
69
-
70
- **`load_chat_model` Parameters:**
71
-
72
- | Parameter | Type | Required | Default | Description |
73
- |-----------|------|----------|---------|-------------|
74
- | `model` | str | Yes | - | Chat model name |
75
- | `model_provider` | str | No | - | Chat model provider name |
76
- | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
77
-
78
61
  Example for integrating a qwen3-4b model deployed using `vllm`:
79
62
 
80
63
  ```python
@@ -102,22 +85,6 @@ Mainly consists of the following two functions:
102
85
  - `register_embeddings_provider`: Register an embedding model provider
103
86
  - `load_embeddings`: Load an embedding model
104
87
 
105
- **`register_embeddings_provider` Parameters:**
106
-
107
- | Parameter | Type | Required | Default | Description |
108
- |-----------|------|----------|---------|-------------|
109
- | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
- | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
- | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
112
-
113
- **`load_embeddings` Parameters:**
114
-
115
- | Parameter | Type | Required | Default | Description |
116
- |-----------|------|----------|---------|-------------|
117
- | `model` | str | Yes | - | Embedding model name |
118
- | `provider` | str | No | - | Embedding model provider name |
119
- | `kwargs` | dict | No | - | Other additional parameters |
120
-
121
88
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
122
89
 
123
90
  ```python
@@ -150,13 +117,9 @@ Includes the following features:
150
117
 
151
118
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
152
119
 
153
- **`merge_ai_message_chunk` Parameters:**
154
-
155
- | Parameter | Type | Required | Default | Description |
156
- |-----------|------|----------|---------|-------------|
157
- | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
158
-
159
120
  ```python
121
+ from langchain_dev_utils.message_convert import merge_ai_message_chunk
122
+
160
123
  chunks = list(model.stream("Hello"))
161
124
  merged = merge_ai_message_chunk(chunks)
162
125
  ```
@@ -165,15 +128,8 @@ merged = merge_ai_message_chunk(chunks)
165
128
 
166
129
  For a list, you can use `format_sequence` to format it.
167
130
 
168
- **`format_sequence` Parameters:**
169
-
170
- | Parameter | Type | Required | Default | Description |
171
- |-----------|------|----------|---------|-------------|
172
- | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
173
- | `separator` | str | No | "-" | String used to join the content |
174
- | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
175
-
176
131
  ```python
132
+ from langchain_dev_utils.message_convert import format_sequence
177
133
  text = format_sequence([
178
134
  "str1",
179
135
  "str2",
@@ -194,19 +150,6 @@ Includes the following features:
194
150
 
195
151
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
196
152
 
197
- **`has_tool_calling` Parameters:**
198
-
199
- | Parameter | Type | Required | Default | Description |
200
- |-----------|------|----------|---------|-------------|
201
- | `message` | AIMessage | Yes | - | AIMessage object |
202
-
203
- **`parse_tool_calling` Parameters:**
204
-
205
- | Parameter | Type | Required | Default | Description |
206
- |-----------|------|----------|---------|-------------|
207
- | `message` | AIMessage | Yes | - | AIMessage object |
208
- | `first_tool_call_only` | bool | No | False | Whether to only check the first tool call |
209
-
210
153
  ```python
211
154
  import datetime
212
155
  from langchain_core.tools import tool
@@ -234,7 +177,7 @@ if has_tool_calling(response):
234
177
  Both can accept a `handler` parameter for custom breakpoint return and response handling logic.
235
178
 
236
179
  ```python
237
- from langchain_dev_utils import human_in_the_loop
180
+ from langchain_dev_utils.tool_calling import human_in_the_loop
238
181
  from langchain_core.tools import tool
239
182
  import datetime
240
183
 
@@ -256,14 +199,7 @@ Includes the following features:
256
199
 
257
200
  #### 4.1 Agent Factory Functions
258
201
 
259
- In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
260
-
261
- **`create_agent` Parameters:**
262
-
263
- | Parameter | Type | Required | Default | Description |
264
- |-----------|------|----------|---------|-------------|
265
- | `model` | str \| BaseChatModel | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
266
- | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
202
+ In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
267
203
 
268
204
  Usage example:
269
205
 
@@ -278,24 +214,26 @@ print(response)
278
214
 
279
215
  #### 4.2 Middleware
280
216
 
281
- Provides some commonly used middleware components. Below are examples of `SummarizationMiddleware` and `PlanMiddleware`.
217
+ Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
282
218
 
283
- `SummarizationMiddleware` is used for agent summarization.
219
+ `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
284
220
 
285
221
  `PlanMiddleware` is used for agent planning.
286
222
 
287
223
  ```python
288
224
  from langchain_dev_utils.agents.middleware import (
289
- SummarizationMiddleware,
225
+ ToolcallRepairMiddleware,
290
226
  PlanMiddleware,
291
227
  )
292
228
 
293
- agent=create_agent(
229
+ agent = create_agent(
294
230
  "vllm:qwen3-4b",
295
231
  name="plan-agent",
296
- middleware=[PlanMiddleware(), SummarizationMiddleware(model="vllm:qwen3-4b")]
232
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
233
+ use_read_plan_tool=False
234
+ )]
297
235
  )
298
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
236
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
299
237
  print(response)
300
238
  ```
301
239
 
@@ -303,29 +241,14 @@ print(response)
303
241
 
304
242
  ### 5. **State Graph Orchestration**
305
243
 
306
- Includes the following features:
244
+ Includes the following capabilities:
307
245
 
308
246
  - Sequential graph orchestration
309
247
  - Parallel graph orchestration
310
248
 
311
249
  #### 5.1 Sequential Graph Orchestration
312
250
 
313
- Sequential graph orchestration:
314
- Uses `create_sequential_pipeline`, supported parameters:
315
-
316
- **`create_sequential_pipeline` Parameters:**
317
-
318
- | Parameter | Type | Required | Default | Description |
319
- |-----------|------|----------|---------|-------------|
320
- | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances) |
321
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
322
- | `graph_name` | str | No | - | Name of the final generated graph |
323
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
324
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
325
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
326
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
327
- | `store` | BaseStore | No | - | LangGraph persistence Store |
328
- | `cache` | BaseCache | No | - | LangGraph Cache |
251
+ Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
329
252
 
330
253
  ```python
331
254
  from langchain.agents import AgentState
@@ -340,25 +263,25 @@ register_model_provider(
340
263
  base_url="http://localhost:8000/v1",
341
264
  )
342
265
 
343
- # Build sequential pipeline (all sub-graphs execute sequentially)
266
+ # Build a sequential pipeline (all subgraphs executed in order)
344
267
  graph = create_sequential_pipeline(
345
268
  sub_graphs=[
346
269
  create_agent(
347
270
  model="vllm:qwen3-4b",
348
271
  tools=[get_current_time],
349
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
272
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
350
273
  name="time_agent",
351
274
  ),
352
275
  create_agent(
353
276
  model="vllm:qwen3-4b",
354
277
  tools=[get_current_weather],
355
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
278
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
356
279
  name="weather_agent",
357
280
  ),
358
281
  create_agent(
359
282
  model="vllm:qwen3-4b",
360
283
  tools=[get_current_user],
361
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
284
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
362
285
  name="user_agent",
363
286
  ),
364
287
  ],
@@ -371,51 +294,36 @@ print(response)
371
294
 
372
295
  #### 5.2 Parallel Graph Orchestration
373
296
 
374
- Parallel graph orchestration:
375
- Uses `create_parallel_pipeline`, supported parameters:
376
-
377
- **`create_parallel_pipeline` Parameters:**
378
-
379
- | Parameter | Type | Required | Default | Description |
380
- |-----------|------|----------|---------|-------------|
381
- | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine |
382
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
383
- | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
384
- | `graph_name` | str | No | - | Name of the final generated graph |
385
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
386
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
387
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
388
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
389
- | `store` | BaseStore | No | - | LangGraph persistence Store |
390
- | `cache` | BaseCache | No | - | LangGraph Cache |
297
+ Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
391
298
 
392
299
  ```python
393
300
  from langchain_dev_utils.pipeline import create_parallel_pipeline
394
301
 
395
- # Build parallel pipeline (all sub-graphs execute in parallel)
302
+ # Build a parallel pipeline (all subgraphs executed concurrently)
396
303
  graph = create_parallel_pipeline(
397
304
  sub_graphs=[
398
305
  create_agent(
399
306
  model="vllm:qwen3-4b",
400
307
  tools=[get_current_time],
401
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
308
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
402
309
  name="time_agent",
403
310
  ),
404
311
  create_agent(
405
312
  model="vllm:qwen3-4b",
406
313
  tools=[get_current_weather],
407
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
314
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
408
315
  name="weather_agent",
409
316
  ),
410
317
  create_agent(
411
318
  model="vllm:qwen3-4b",
412
319
  tools=[get_current_user],
413
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
320
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
414
321
  name="user_agent",
415
322
  ),
416
323
  ],
417
324
  state_schema=AgentState,
418
325
  )
326
+
419
327
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
420
328
  print(response)
421
329
  ```
@@ -426,4 +334,4 @@ print(response)
426
334
 
427
335
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
428
336
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
429
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
337
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -41,24 +41,6 @@ Mainly consists of the following two functions:
41
41
  - `register_model_provider`: Register a chat model provider
42
42
  - `load_chat_model`: Load a chat model
43
43
 
44
- **`register_model_provider` Parameters:**
45
-
46
- | Parameter | Type | Required | Default | Description |
47
- |-----------|------|----------|---------|-------------|
48
- | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
49
- | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
50
- | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
51
- | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
52
- | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
53
-
54
- **`load_chat_model` Parameters:**
55
-
56
- | Parameter | Type | Required | Default | Description |
57
- |-----------|------|----------|---------|-------------|
58
- | `model` | str | Yes | - | Chat model name |
59
- | `model_provider` | str | No | - | Chat model provider name |
60
- | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
61
-
62
44
  Example for integrating a qwen3-4b model deployed using `vllm`:
63
45
 
64
46
  ```python
@@ -86,22 +68,6 @@ Mainly consists of the following two functions:
86
68
  - `register_embeddings_provider`: Register an embedding model provider
87
69
  - `load_embeddings`: Load an embedding model
88
70
 
89
- **`register_embeddings_provider` Parameters:**
90
-
91
- | Parameter | Type | Required | Default | Description |
92
- |-----------|------|----------|---------|-------------|
93
- | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
94
- | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
95
- | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
96
-
97
- **`load_embeddings` Parameters:**
98
-
99
- | Parameter | Type | Required | Default | Description |
100
- |-----------|------|----------|---------|-------------|
101
- | `model` | str | Yes | - | Embedding model name |
102
- | `provider` | str | No | - | Embedding model provider name |
103
- | `kwargs` | dict | No | - | Other additional parameters |
104
-
105
71
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
106
72
 
107
73
  ```python
@@ -134,13 +100,9 @@ Includes the following features:
134
100
 
135
101
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
136
102
 
137
- **`merge_ai_message_chunk` Parameters:**
138
-
139
- | Parameter | Type | Required | Default | Description |
140
- |-----------|------|----------|---------|-------------|
141
- | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
142
-
143
103
  ```python
104
+ from langchain_dev_utils.message_convert import merge_ai_message_chunk
105
+
144
106
  chunks = list(model.stream("Hello"))
145
107
  merged = merge_ai_message_chunk(chunks)
146
108
  ```
@@ -149,15 +111,8 @@ merged = merge_ai_message_chunk(chunks)
149
111
 
150
112
  For a list, you can use `format_sequence` to format it.
151
113
 
152
- **`format_sequence` Parameters:**
153
-
154
- | Parameter | Type | Required | Default | Description |
155
- |-----------|------|----------|---------|-------------|
156
- | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
157
- | `separator` | str | No | "-" | String used to join the content |
158
- | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
159
-
160
114
  ```python
115
+ from langchain_dev_utils.message_convert import format_sequence
161
116
  text = format_sequence([
162
117
  "str1",
163
118
  "str2",
@@ -178,19 +133,6 @@ Includes the following features:
178
133
 
179
134
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
180
135
 
181
- **`has_tool_calling` Parameters:**
182
-
183
- | Parameter | Type | Required | Default | Description |
184
- |-----------|------|----------|---------|-------------|
185
- | `message` | AIMessage | Yes | - | AIMessage object |
186
-
187
- **`parse_tool_calling` Parameters:**
188
-
189
- | Parameter | Type | Required | Default | Description |
190
- |-----------|------|----------|---------|-------------|
191
- | `message` | AIMessage | Yes | - | AIMessage object |
192
- | `first_tool_call_only` | bool | No | False | Whether to only check the first tool call |
193
-
194
136
  ```python
195
137
  import datetime
196
138
  from langchain_core.tools import tool
@@ -218,7 +160,7 @@ if has_tool_calling(response):
218
160
  Both can accept a `handler` parameter for custom breakpoint return and response handling logic.
219
161
 
220
162
  ```python
221
- from langchain_dev_utils import human_in_the_loop
163
+ from langchain_dev_utils.tool_calling import human_in_the_loop
222
164
  from langchain_core.tools import tool
223
165
  import datetime
224
166
 
@@ -240,14 +182,7 @@ Includes the following features:
240
182
 
241
183
  #### 4.1 Agent Factory Functions
242
184
 
243
- In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
244
-
245
- **`create_agent` Parameters:**
246
-
247
- | Parameter | Type | Required | Default | Description |
248
- |-----------|------|----------|---------|-------------|
249
- | `model` | str \| BaseChatModel | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
250
- | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
185
+ In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
251
186
 
252
187
  Usage example:
253
188
 
@@ -262,24 +197,26 @@ print(response)
262
197
 
263
198
  #### 4.2 Middleware
264
199
 
265
- Provides some commonly used middleware components. Below are examples of `SummarizationMiddleware` and `PlanMiddleware`.
200
+ Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
266
201
 
267
- `SummarizationMiddleware` is used for agent summarization.
202
+ `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
268
203
 
269
204
  `PlanMiddleware` is used for agent planning.
270
205
 
271
206
  ```python
272
207
  from langchain_dev_utils.agents.middleware import (
273
- SummarizationMiddleware,
208
+ ToolcallRepairMiddleware,
274
209
  PlanMiddleware,
275
210
  )
276
211
 
277
- agent=create_agent(
212
+ agent = create_agent(
278
213
  "vllm:qwen3-4b",
279
214
  name="plan-agent",
280
- middleware=[PlanMiddleware(), SummarizationMiddleware(model="vllm:qwen3-4b")]
215
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
216
+ use_read_plan_tool=False
217
+ )]
281
218
  )
282
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
219
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
283
220
  print(response)
284
221
  ```
285
222
 
@@ -287,29 +224,14 @@ print(response)
287
224
 
288
225
  ### 5. **State Graph Orchestration**
289
226
 
290
- Includes the following features:
227
+ Includes the following capabilities:
291
228
 
292
229
  - Sequential graph orchestration
293
230
  - Parallel graph orchestration
294
231
 
295
232
  #### 5.1 Sequential Graph Orchestration
296
233
 
297
- Sequential graph orchestration:
298
- Uses `create_sequential_pipeline`, supported parameters:
299
-
300
- **`create_sequential_pipeline` Parameters:**
301
-
302
- | Parameter | Type | Required | Default | Description |
303
- |-----------|------|----------|---------|-------------|
304
- | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances) |
305
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
306
- | `graph_name` | str | No | - | Name of the final generated graph |
307
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
308
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
309
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
310
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
311
- | `store` | BaseStore | No | - | LangGraph persistence Store |
312
- | `cache` | BaseCache | No | - | LangGraph Cache |
234
+ Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
313
235
 
314
236
  ```python
315
237
  from langchain.agents import AgentState
@@ -324,25 +246,25 @@ register_model_provider(
324
246
  base_url="http://localhost:8000/v1",
325
247
  )
326
248
 
327
- # Build sequential pipeline (all sub-graphs execute sequentially)
249
+ # Build a sequential pipeline (all subgraphs executed in order)
328
250
  graph = create_sequential_pipeline(
329
251
  sub_graphs=[
330
252
  create_agent(
331
253
  model="vllm:qwen3-4b",
332
254
  tools=[get_current_time],
333
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
255
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
334
256
  name="time_agent",
335
257
  ),
336
258
  create_agent(
337
259
  model="vllm:qwen3-4b",
338
260
  tools=[get_current_weather],
339
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
261
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
340
262
  name="weather_agent",
341
263
  ),
342
264
  create_agent(
343
265
  model="vllm:qwen3-4b",
344
266
  tools=[get_current_user],
345
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
267
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
346
268
  name="user_agent",
347
269
  ),
348
270
  ],
@@ -355,51 +277,36 @@ print(response)
355
277
 
356
278
  #### 5.2 Parallel Graph Orchestration
357
279
 
358
- Parallel graph orchestration:
359
- Uses `create_parallel_pipeline`, supported parameters:
360
-
361
- **`create_parallel_pipeline` Parameters:**
362
-
363
- | Parameter | Type | Required | Default | Description |
364
- |-----------|------|----------|---------|-------------|
365
- | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine |
366
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
367
- | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
368
- | `graph_name` | str | No | - | Name of the final generated graph |
369
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
370
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
371
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
372
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
373
- | `store` | BaseStore | No | - | LangGraph persistence Store |
374
- | `cache` | BaseCache | No | - | LangGraph Cache |
280
+ Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
375
281
 
376
282
  ```python
377
283
  from langchain_dev_utils.pipeline import create_parallel_pipeline
378
284
 
379
- # Build parallel pipeline (all sub-graphs execute in parallel)
285
+ # Build a parallel pipeline (all subgraphs executed concurrently)
380
286
  graph = create_parallel_pipeline(
381
287
  sub_graphs=[
382
288
  create_agent(
383
289
  model="vllm:qwen3-4b",
384
290
  tools=[get_current_time],
385
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
291
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
386
292
  name="time_agent",
387
293
  ),
388
294
  create_agent(
389
295
  model="vllm:qwen3-4b",
390
296
  tools=[get_current_weather],
391
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
297
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
392
298
  name="weather_agent",
393
299
  ),
394
300
  create_agent(
395
301
  model="vllm:qwen3-4b",
396
302
  tools=[get_current_user],
397
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
303
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
398
304
  name="user_agent",
399
305
  ),
400
306
  ],
401
307
  state_schema=AgentState,
402
308
  )
309
+
403
310
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
404
311
  print(response)
405
312
  ```
@@ -410,4 +317,4 @@ print(response)
410
317
 
411
318
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
412
319
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
413
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
320
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!