langchain-dev-utils 1.2.4__tar.gz → 1.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/PKG-INFO +85 -54
  2. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/README.md +84 -53
  3. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/README_cn.md +84 -54
  4. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/pyproject.toml +1 -1
  5. langchain_dev_utils-1.2.6/src/langchain_dev_utils/__init__.py +1 -0
  6. langchain_dev_utils-1.2.6/src/langchain_dev_utils/_utils.py +39 -0
  7. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/factory.py +2 -1
  8. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -18
  9. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +28 -14
  10. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/chat_models/base.py +68 -83
  11. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/chat_models/types.py +2 -1
  12. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/embeddings/base.py +7 -30
  13. langchain_dev_utils-1.2.6/tests/__init__.py +38 -0
  14. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_agent.py +0 -15
  15. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_chat_models.py +1 -12
  16. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_human_in_the_loop.py +1 -6
  17. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_load_embbeding.py +1 -19
  18. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_load_model.py +1 -23
  19. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_model_tool_emulator.py +0 -9
  20. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_plan_middleware.py +0 -5
  21. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_router_model.py +0 -15
  22. langchain_dev_utils-1.2.4/src/langchain_dev_utils/__init__.py +0 -1
  23. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/.gitignore +0 -0
  24. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/.python-version +0 -0
  25. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/.vscode/settings.json +0 -0
  26. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/LICENSE +0 -0
  27. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/__init__.py +0 -0
  28. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/file_system.py +0 -0
  29. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  30. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  31. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  32. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  33. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  34. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  35. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/plan.py +0 -0
  36. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/agents/wrap.py +0 -0
  37. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  38. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  39. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  40. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  41. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/message_convert/content.py +0 -0
  42. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/message_convert/format.py +0 -0
  43. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  44. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  45. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  46. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/pipeline/types.py +0 -0
  47. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/py.typed +0 -0
  48. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  49. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  50. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  51. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_messages.py +0 -0
  52. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_pipline.py +0 -0
  53. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_tool_calling.py +0 -0
  54. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/tests/test_wrap_agent.py +0 -0
  55. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.6}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.4
3
+ Version: 1.2.6
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -57,19 +57,23 @@ Mainly consists of the following two functions:
57
57
  - `register_model_provider`: Register a chat model provider
58
58
  - `load_chat_model`: Load a chat model
59
59
 
60
- `register_model_provider` parameter description:
60
+ **`register_model_provider` Parameters:**
61
61
 
62
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
63
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
64
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
65
- - `provider_profile`: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
66
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
62
+ | Parameter | Type | Required | Default | Description |
63
+ |-----------|------|----------|---------|-------------|
64
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
67
69
 
68
- `load_chat_model` parameter description:
70
+ **`load_chat_model` Parameters:**
69
71
 
70
- - `model`: Chat model name, type str
71
- - `model_provider`: Chat model provider name, type str, optional
72
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
72
+ | Parameter | Type | Required | Default | Description |
73
+ |-----------|------|----------|---------|-------------|
74
+ | `model` | str | Yes | - | Chat model name |
75
+ | `model_provider` | str | No | - | Chat model provider name |
76
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
73
77
 
74
78
  Example for integrating a qwen3-4b model deployed using `vllm`:
75
79
 
@@ -98,17 +102,21 @@ Mainly consists of the following two functions:
98
102
  - `register_embeddings_provider`: Register an embedding model provider
99
103
  - `load_embeddings`: Load an embedding model
100
104
 
101
- `register_embeddings_provider` parameter description:
105
+ **`register_embeddings_provider` Parameters:**
102
106
 
103
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
104
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
105
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
107
+ | Parameter | Type | Required | Default | Description |
108
+ |-----------|------|----------|---------|-------------|
109
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
106
112
 
107
- `load_embeddings` parameter description:
113
+ **`load_embeddings` Parameters:**
108
114
 
109
- - `model`: Embedding model name, type str
110
- - `provider`: Embedding model provider name, type str, optional
111
- - `kwargs`: Other additional parameters
115
+ | Parameter | Type | Required | Default | Description |
116
+ |-----------|------|----------|---------|-------------|
117
+ | `model` | str | Yes | - | Embedding model name |
118
+ | `provider` | str | No | - | Embedding model provider name |
119
+ | `kwargs` | dict | No | - | Other additional parameters |
112
120
 
113
121
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
114
122
 
@@ -142,11 +150,15 @@ Includes the following features:
142
150
 
143
151
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
144
152
 
145
- `merge_ai_message_chunk` parameter description:
153
+ **`merge_ai_message_chunk` Parameters:**
146
154
 
147
- - `chunks`: List of AIMessageChunk
155
+ | Parameter | Type | Required | Default | Description |
156
+ |-----------|------|----------|---------|-------------|
157
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
148
158
 
149
159
  ```python
160
+ from langchain_dev_utils.message_convert import merge_ai_message_chunk
161
+
150
162
  chunks = list(model.stream("Hello"))
151
163
  merged = merge_ai_message_chunk(chunks)
152
164
  ```
@@ -155,16 +167,16 @@ merged = merge_ai_message_chunk(chunks)
155
167
 
156
168
  For a list, you can use `format_sequence` to format it.
157
169
 
158
- `format_sequence` parameter description:
170
+ **`format_sequence` Parameters:**
159
171
 
160
- - `inputs`: A list containing any of the following types:
161
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
162
- - langchain_core.documents.Document
163
- - str
164
- - `separator`: String used to join the content, defaults to "-".
165
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
172
+ | Parameter | Type | Required | Default | Description |
173
+ |-----------|------|----------|---------|-------------|
174
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
175
+ | `separator` | str | No | "-" | String used to join the content |
176
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
166
177
 
167
178
  ```python
179
+ from langchain_dev_utils.message_convert import format_sequence
168
180
  text = format_sequence([
169
181
  "str1",
170
182
  "str2",
@@ -185,14 +197,18 @@ Includes the following features:
185
197
 
186
198
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
187
199
 
188
- `has_tool_calling` parameter description:
200
+ **`has_tool_calling` Parameters:**
189
201
 
190
- - `message`: AIMessage object
202
+ | Parameter | Type | Required | Default | Description |
203
+ |-----------|------|----------|---------|-------------|
204
+ | `message` | AIMessage | Yes | - | AIMessage object |
191
205
 
192
- `parse_tool_calling` parameter description:
206
+ **`parse_tool_calling` Parameters:**
193
207
 
194
- - `message`: AIMessage object
195
- - `first_tool_call_only`: Whether to only check the first tool call
208
+ | Parameter | Type | Required | Default | Description |
209
+ |-----------|------|----------|---------|-------------|
210
+ | `message` | AIMessage | Yes | - | AIMessage object |
211
+ | `first_tool_call_only` | bool | No | False | Whether to only parse the first tool call |
196
212
 
197
213
  ```python
198
214
  import datetime
@@ -221,7 +237,7 @@ if has_tool_calling(response):
221
237
  Both can accept a `handler` parameter for custom breakpoint return and response handling logic.
222
238
 
223
239
  ```python
224
- from langchain_dev_utils import human_in_the_loop
240
+ from langchain_dev_utils.tool_calling import human_in_the_loop
225
241
  from langchain_core.tools import tool
226
242
  import datetime
227
243
 
@@ -245,6 +261,13 @@ Includes the following features:
245
261
 
246
262
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
247
263
 
264
+ **`create_agent` Parameters:**
265
+
266
+ | Parameter | Type | Required | Default | Description |
267
+ |-----------|------|----------|---------|-------------|
268
+ | `model` | str | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
269
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
270
+
248
271
  Usage example:
249
272
 
250
273
  ```python
@@ -293,15 +316,19 @@ Includes the following features:
293
316
  Sequential graph orchestration:
294
317
  Uses `create_sequential_pipeline`, supported parameters:
295
318
 
296
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
297
- - `state_schema`: State Schema for the final generated graph
298
- - `graph_name`: Name of the final generated graph (optional)
299
- - `context_schema`: Context Schema for the final generated graph (optional)
300
- - `input_schema`: Input Schema for the final generated graph (optional)
301
- - `output_schema`: Output Schema for the final generated graph (optional)
302
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
303
- - `store`: LangGraph persistence Store (optional)
304
- - `cache`: LangGraph Cache (optional)
319
+ **`create_sequential_pipeline` Parameters:**
320
+
321
+ | Parameter | Type | Required | Default | Description |
322
+ |-----------|------|----------|---------|-------------|
323
+ | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
324
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
325
+ | `graph_name` | str | No | - | Name of the final generated graph |
326
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
327
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
328
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
329
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
330
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
331
+ | `cache` | BaseCache | No | - | LangGraph Cache |
305
332
 
306
333
  ```python
307
334
  from langchain.agents import AgentState
@@ -350,16 +377,20 @@ print(response)
350
377
  Parallel graph orchestration:
351
378
  Uses `create_parallel_pipeline`, supported parameters:
352
379
 
353
- - `sub_graphs`: List of state graphs to combine
354
- - `state_schema`: State Schema for the final generated graph
355
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
356
- - `graph_name`: Name of the final generated graph (optional)
357
- - `context_schema`: Context Schema for the final generated graph (optional)
358
- - `input_schema`: Input Schema for the final generated graph (optional)
359
- - `output_schema`: Output Schema for the final generated graph (optional)
360
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
361
- - `store`: LangGraph persistence Store (optional)
362
- - `cache`: LangGraph Cache (optional)
380
+ **`create_parallel_pipeline` Parameters:**
381
+
382
+ | Parameter | Type | Required | Default | Description |
383
+ |-----------|------|----------|---------|-------------|
384
+ | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
385
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
386
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
387
+ | `graph_name` | str | No | - | Name of the final generated graph |
388
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
389
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
390
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
391
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
392
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
393
+ | `cache` | BaseCache | No | - | LangGraph Cache |
363
394
 
364
395
  ```python
365
396
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -398,4 +429,4 @@ print(response)
398
429
 
399
430
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
400
431
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
401
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
432
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -41,19 +41,23 @@ Mainly consists of the following two functions:
41
41
  - `register_model_provider`: Register a chat model provider
42
42
  - `load_chat_model`: Load a chat model
43
43
 
44
- `register_model_provider` parameter description:
44
+ **`register_model_provider` Parameters:**
45
45
 
46
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
47
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
48
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
49
- - `provider_profile`: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
50
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
46
+ | Parameter | Type | Required | Default | Description |
47
+ |-----------|------|----------|---------|-------------|
48
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
49
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
50
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
51
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
52
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
51
53
 
52
- `load_chat_model` parameter description:
54
+ **`load_chat_model` Parameters:**
53
55
 
54
- - `model`: Chat model name, type str
55
- - `model_provider`: Chat model provider name, type str, optional
56
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
56
+ | Parameter | Type | Required | Default | Description |
57
+ |-----------|------|----------|---------|-------------|
58
+ | `model` | str | Yes | - | Chat model name |
59
+ | `model_provider` | str | No | - | Chat model provider name |
60
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
57
61
 
58
62
  Example for integrating a qwen3-4b model deployed using `vllm`:
59
63
 
@@ -82,17 +86,21 @@ Mainly consists of the following two functions:
82
86
  - `register_embeddings_provider`: Register an embedding model provider
83
87
  - `load_embeddings`: Load an embedding model
84
88
 
85
- `register_embeddings_provider` parameter description:
89
+ **`register_embeddings_provider` Parameters:**
86
90
 
87
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
88
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
89
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
91
+ | Parameter | Type | Required | Default | Description |
92
+ |-----------|------|----------|---------|-------------|
93
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
94
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
95
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
90
96
 
91
- `load_embeddings` parameter description:
97
+ **`load_embeddings` Parameters:**
92
98
 
93
- - `model`: Embedding model name, type str
94
- - `provider`: Embedding model provider name, type str, optional
95
- - `kwargs`: Other additional parameters
99
+ | Parameter | Type | Required | Default | Description |
100
+ |-----------|------|----------|---------|-------------|
101
+ | `model` | str | Yes | - | Embedding model name |
102
+ | `provider` | str | No | - | Embedding model provider name |
103
+ | `kwargs` | dict | No | - | Other additional parameters |
96
104
 
97
105
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
98
106
 
@@ -126,11 +134,15 @@ Includes the following features:
126
134
 
127
135
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
128
136
 
129
- `merge_ai_message_chunk` parameter description:
137
+ **`merge_ai_message_chunk` Parameters:**
130
138
 
131
- - `chunks`: List of AIMessageChunk
139
+ | Parameter | Type | Required | Default | Description |
140
+ |-----------|------|----------|---------|-------------|
141
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
132
142
 
133
143
  ```python
144
+ from langchain_dev_utils.message_convert import merge_ai_message_chunk
145
+
134
146
  chunks = list(model.stream("Hello"))
135
147
  merged = merge_ai_message_chunk(chunks)
136
148
  ```
@@ -139,16 +151,16 @@ merged = merge_ai_message_chunk(chunks)
139
151
 
140
152
  For a list, you can use `format_sequence` to format it.
141
153
 
142
- `format_sequence` parameter description:
154
+ **`format_sequence` Parameters:**
143
155
 
144
- - `inputs`: A list containing any of the following types:
145
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
146
- - langchain_core.documents.Document
147
- - str
148
- - `separator`: String used to join the content, defaults to "-".
149
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
156
+ | Parameter | Type | Required | Default | Description |
157
+ |-----------|------|----------|---------|-------------|
158
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
159
+ | `separator` | str | No | "-" | String used to join the content |
160
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
150
161
 
151
162
  ```python
163
+ from langchain_dev_utils.message_convert import format_sequence
152
164
  text = format_sequence([
153
165
  "str1",
154
166
  "str2",
@@ -169,14 +181,18 @@ Includes the following features:
169
181
 
170
182
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
171
183
 
172
- `has_tool_calling` parameter description:
184
+ **`has_tool_calling` Parameters:**
173
185
 
174
- - `message`: AIMessage object
186
+ | Parameter | Type | Required | Default | Description |
187
+ |-----------|------|----------|---------|-------------|
188
+ | `message` | AIMessage | Yes | - | AIMessage object |
175
189
 
176
- `parse_tool_calling` parameter description:
190
+ **`parse_tool_calling` Parameters:**
177
191
 
178
- - `message`: AIMessage object
179
- - `first_tool_call_only`: Whether to only check the first tool call
192
+ | Parameter | Type | Required | Default | Description |
193
+ |-----------|------|----------|---------|-------------|
194
+ | `message` | AIMessage | Yes | - | AIMessage object |
195
+ | `first_tool_call_only` | bool | No | False | Whether to only parse the first tool call |
180
196
 
181
197
  ```python
182
198
  import datetime
@@ -205,7 +221,7 @@ if has_tool_calling(response):
205
221
  Both can accept a `handler` parameter for custom breakpoint return and response handling logic.
206
222
 
207
223
  ```python
208
- from langchain_dev_utils import human_in_the_loop
224
+ from langchain_dev_utils.tool_calling import human_in_the_loop
209
225
  from langchain_core.tools import tool
210
226
  import datetime
211
227
 
@@ -229,6 +245,13 @@ Includes the following features:
229
245
 
230
246
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
231
247
 
248
+ **`create_agent` Parameters:**
249
+
250
+ | Parameter | Type | Required | Default | Description |
251
+ |-----------|------|----------|---------|-------------|
252
+ | `model` | str | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
253
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
254
+
232
255
  Usage example:
233
256
 
234
257
  ```python
@@ -277,15 +300,19 @@ Includes the following features:
277
300
  Sequential graph orchestration:
278
301
  Uses `create_sequential_pipeline`, supported parameters:
279
302
 
280
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
281
- - `state_schema`: State Schema for the final generated graph
282
- - `graph_name`: Name of the final generated graph (optional)
283
- - `context_schema`: Context Schema for the final generated graph (optional)
284
- - `input_schema`: Input Schema for the final generated graph (optional)
285
- - `output_schema`: Output Schema for the final generated graph (optional)
286
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
287
- - `store`: LangGraph persistence Store (optional)
288
- - `cache`: LangGraph Cache (optional)
303
+ **`create_sequential_pipeline` Parameters:**
304
+
305
+ | Parameter | Type | Required | Default | Description |
306
+ |-----------|------|----------|---------|-------------|
307
+ | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
308
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
309
+ | `graph_name` | str | No | - | Name of the final generated graph |
310
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
311
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
312
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
313
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
314
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
315
+ | `cache` | BaseCache | No | - | LangGraph Cache |
289
316
 
290
317
  ```python
291
318
  from langchain.agents import AgentState
@@ -334,16 +361,20 @@ print(response)
334
361
  Parallel graph orchestration:
335
362
  Uses `create_parallel_pipeline`, supported parameters:
336
363
 
337
- - `sub_graphs`: List of state graphs to combine
338
- - `state_schema`: State Schema for the final generated graph
339
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
340
- - `graph_name`: Name of the final generated graph (optional)
341
- - `context_schema`: Context Schema for the final generated graph (optional)
342
- - `input_schema`: Input Schema for the final generated graph (optional)
343
- - `output_schema`: Output Schema for the final generated graph (optional)
344
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
345
- - `store`: LangGraph persistence Store (optional)
346
- - `cache`: LangGraph Cache (optional)
364
+ **`create_parallel_pipeline` Parameters:**
365
+
366
+ | Parameter | Type | Required | Default | Description |
367
+ |-----------|------|----------|---------|-------------|
368
+ | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
369
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
370
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
371
+ | `graph_name` | str | No | - | Name of the final generated graph |
372
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
373
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
374
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
375
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
376
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
377
+ | `cache` | BaseCache | No | - | LangGraph Cache |
347
378
 
348
379
  ```python
349
380
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -382,4 +413,4 @@ print(response)
382
413
 
383
414
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
384
415
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
385
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
416
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!