langchain-dev-utils 1.2.3__tar.gz → 1.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/.gitignore +1 -0
  2. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/PKG-INFO +81 -52
  3. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/README.md +80 -51
  4. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/README_cn.md +81 -51
  5. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/pyproject.toml +4 -2
  6. langchain_dev_utils-1.2.5/src/langchain_dev_utils/__init__.py +1 -0
  7. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/factory.py +2 -1
  8. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -18
  9. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +28 -14
  10. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/base.py +73 -37
  11. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/types.py +2 -1
  12. langchain_dev_utils-1.2.5/tests/__init__.py +38 -0
  13. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_agent.py +0 -15
  14. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_chat_models.py +1 -12
  15. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_human_in_the_loop.py +1 -6
  16. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_load_embbeding.py +1 -19
  17. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_load_model.py +21 -19
  18. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_model_tool_emulator.py +0 -9
  19. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_plan_middleware.py +0 -5
  20. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_router_model.py +0 -15
  21. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/uv.lock +32 -17
  22. langchain_dev_utils-1.2.3/src/langchain_dev_utils/__init__.py +0 -1
  23. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/.python-version +0 -0
  24. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/.vscode/settings.json +0 -0
  25. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/LICENSE +0 -0
  26. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/__init__.py +0 -0
  27. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/file_system.py +0 -0
  28. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  29. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  30. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  31. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  32. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  33. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  34. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/plan.py +0 -0
  35. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/wrap.py +0 -0
  36. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  37. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  38. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  39. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/embeddings/base.py +0 -0
  40. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  41. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/message_convert/content.py +0 -0
  42. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/message_convert/format.py +0 -0
  43. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  44. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  45. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  46. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/types.py +0 -0
  47. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/py.typed +0 -0
  48. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  49. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  50. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  51. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_messages.py +0 -0
  52. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_pipline.py +0 -0
  53. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_tool_calling.py +0 -0
  54. {langchain_dev_utils-1.2.3 → langchain_dev_utils-1.2.5}/tests/test_wrap_agent.py +0 -0
@@ -10,3 +10,4 @@ wheels/
10
10
  .venv
11
11
  .env
12
12
  .benchmarks
13
+ data/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.3
3
+ Version: 1.2.5
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -57,18 +57,23 @@ Mainly consists of the following two functions:
57
57
  - `register_model_provider`: Register a chat model provider
58
58
  - `load_chat_model`: Load a chat model
59
59
 
60
- `register_model_provider` parameter description:
60
+ **`register_model_provider` Parameters:**
61
61
 
62
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
63
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
64
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
65
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
62
+ | Parameter | Type | Required | Default | Description |
63
+ |-----------|------|----------|---------|-------------|
64
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
66
69
 
67
- `load_chat_model` parameter description:
70
+ **`load_chat_model` Parameters:**
68
71
 
69
- - `model`: Chat model name, type str
70
- - `model_provider`: Chat model provider name, type str, optional
71
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
72
+ | Parameter | Type | Required | Default | Description |
73
+ |-----------|------|----------|---------|-------------|
74
+ | `model` | str | Yes | - | Chat model name |
75
+ | `model_provider` | str | No | - | Chat model provider name |
76
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
72
77
 
73
78
  Example for integrating a qwen3-4b model deployed using `vllm`:
74
79
 
@@ -97,17 +102,21 @@ Mainly consists of the following two functions:
97
102
  - `register_embeddings_provider`: Register an embedding model provider
98
103
  - `load_embeddings`: Load an embedding model
99
104
 
100
- `register_embeddings_provider` parameter description:
105
+ **`register_embeddings_provider` Parameters:**
101
106
 
102
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
103
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
104
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
107
+ | Parameter | Type | Required | Default | Description |
108
+ |-----------|------|----------|---------|-------------|
109
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
105
112
 
106
- `load_embeddings` parameter description:
113
+ **`load_embeddings` Parameters:**
107
114
 
108
- - `model`: Embedding model name, type str
109
- - `provider`: Embedding model provider name, type str, optional
110
- - `kwargs`: Other additional parameters
115
+ | Parameter | Type | Required | Default | Description |
116
+ |-----------|------|----------|---------|-------------|
117
+ | `model` | str | Yes | - | Embedding model name |
118
+ | `provider` | str | No | - | Embedding model provider name |
119
+ | `kwargs` | dict | No | - | Other additional parameters |
111
120
 
112
121
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
113
122
 
@@ -141,9 +150,11 @@ Includes the following features:
141
150
 
142
151
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
143
152
 
144
- `merge_ai_message_chunk` parameter description:
153
+ **`merge_ai_message_chunk` Parameters:**
145
154
 
146
- - `chunks`: List of AIMessageChunk
155
+ | Parameter | Type | Required | Default | Description |
156
+ |-----------|------|----------|---------|-------------|
157
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
147
158
 
148
159
  ```python
149
160
  chunks = list(model.stream("Hello"))
@@ -154,14 +165,13 @@ merged = merge_ai_message_chunk(chunks)
154
165
 
155
166
  For a list, you can use `format_sequence` to format it.
156
167
 
157
- `format_sequence` parameter description:
168
+ **`format_sequence` Parameters:**
158
169
 
159
- - `inputs`: A list containing any of the following types:
160
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
161
- - langchain_core.documents.Document
162
- - str
163
- - `separator`: String used to join the content, defaults to "-".
164
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
170
+ | Parameter | Type | Required | Default | Description |
171
+ |-----------|------|----------|---------|-------------|
172
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
173
+ | `separator` | str | No | "-" | String used to join the content |
174
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
165
175
 
166
176
  ```python
167
177
  text = format_sequence([
@@ -184,14 +194,18 @@ Includes the following features:
184
194
 
185
195
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
186
196
 
187
- `has_tool_calling` parameter description:
197
+ **`has_tool_calling` Parameters:**
188
198
 
189
- - `message`: AIMessage object
199
+ | Parameter | Type | Required | Default | Description |
200
+ |-----------|------|----------|---------|-------------|
201
+ | `message` | AIMessage | Yes | - | AIMessage object |
190
202
 
191
- `parse_tool_calling` parameter description:
203
+ **`parse_tool_calling` Parameters:**
192
204
 
193
- - `message`: AIMessage object
194
- - `first_tool_call_only`: Whether to only check the first tool call
205
+ | Parameter | Type | Required | Default | Description |
206
+ |-----------|------|----------|---------|-------------|
207
+ | `message` | AIMessage | Yes | - | AIMessage object |
208
+ | `first_tool_call_only` | bool | No | False | Whether to only check the first tool call |
195
209
 
196
210
  ```python
197
211
  import datetime
@@ -244,6 +258,13 @@ Includes the following features:
244
258
 
245
259
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
246
260
 
261
+ **`create_agent` Parameters:**
262
+
263
+ | Parameter | Type | Required | Default | Description |
264
+ |-----------|------|----------|---------|-------------|
265
+ | `model` | str \| BaseChatModel | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
266
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
267
+
247
268
  Usage example:
248
269
 
249
270
  ```python
@@ -292,15 +313,19 @@ Includes the following features:
292
313
  Sequential graph orchestration:
293
314
  Uses `create_sequential_pipeline`, supported parameters:
294
315
 
295
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
296
- - `state_schema`: State Schema for the final generated graph
297
- - `graph_name`: Name of the final generated graph (optional)
298
- - `context_schema`: Context Schema for the final generated graph (optional)
299
- - `input_schema`: Input Schema for the final generated graph (optional)
300
- - `output_schema`: Output Schema for the final generated graph (optional)
301
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
302
- - `store`: LangGraph persistence Store (optional)
303
- - `cache`: LangGraph Cache (optional)
316
+ **`create_sequential_pipeline` Parameters:**
317
+
318
+ | Parameter | Type | Required | Default | Description |
319
+ |-----------|------|----------|---------|-------------|
320
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances) |
321
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
322
+ | `graph_name` | str | No | - | Name of the final generated graph |
323
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
324
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
325
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
326
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
327
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
328
+ | `cache` | BaseCache | No | - | LangGraph Cache |
304
329
 
305
330
  ```python
306
331
  from langchain.agents import AgentState
@@ -349,16 +374,20 @@ print(response)
349
374
  Parallel graph orchestration:
350
375
  Uses `create_parallel_pipeline`, supported parameters:
351
376
 
352
- - `sub_graphs`: List of state graphs to combine
353
- - `state_schema`: State Schema for the final generated graph
354
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
355
- - `graph_name`: Name of the final generated graph (optional)
356
- - `context_schema`: Context Schema for the final generated graph (optional)
357
- - `input_schema`: Input Schema for the final generated graph (optional)
358
- - `output_schema`: Output Schema for the final generated graph (optional)
359
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
360
- - `store`: LangGraph persistence Store (optional)
361
- - `cache`: LangGraph Cache (optional)
377
+ **`create_parallel_pipeline` Parameters:**
378
+
379
+ | Parameter | Type | Required | Default | Description |
380
+ |-----------|------|----------|---------|-------------|
381
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine |
382
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
383
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
384
+ | `graph_name` | str | No | - | Name of the final generated graph |
385
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
386
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
387
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
388
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
389
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
390
+ | `cache` | BaseCache | No | - | LangGraph Cache |
362
391
 
363
392
  ```python
364
393
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -397,4 +426,4 @@ print(response)
397
426
 
398
427
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
399
428
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
400
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
429
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -41,18 +41,23 @@ Mainly consists of the following two functions:
41
41
  - `register_model_provider`: Register a chat model provider
42
42
  - `load_chat_model`: Load a chat model
43
43
 
44
- `register_model_provider` parameter description:
44
+ **`register_model_provider` Parameters:**
45
45
 
46
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
47
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
48
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
49
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
46
+ | Parameter | Type | Required | Default | Description |
47
+ |-----------|------|----------|---------|-------------|
48
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
49
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
50
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
51
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
52
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
50
53
 
51
- `load_chat_model` parameter description:
54
+ **`load_chat_model` Parameters:**
52
55
 
53
- - `model`: Chat model name, type str
54
- - `model_provider`: Chat model provider name, type str, optional
55
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
56
+ | Parameter | Type | Required | Default | Description |
57
+ |-----------|------|----------|---------|-------------|
58
+ | `model` | str | Yes | - | Chat model name |
59
+ | `model_provider` | str | No | - | Chat model provider name |
60
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
56
61
 
57
62
  Example for integrating a qwen3-4b model deployed using `vllm`:
58
63
 
@@ -81,17 +86,21 @@ Mainly consists of the following two functions:
81
86
  - `register_embeddings_provider`: Register an embedding model provider
82
87
  - `load_embeddings`: Load an embedding model
83
88
 
84
- `register_embeddings_provider` parameter description:
89
+ **`register_embeddings_provider` Parameters:**
85
90
 
86
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
87
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
88
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
91
+ | Parameter | Type | Required | Default | Description |
92
+ |-----------|------|----------|---------|-------------|
93
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
94
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
95
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
89
96
 
90
- `load_embeddings` parameter description:
97
+ **`load_embeddings` Parameters:**
91
98
 
92
- - `model`: Embedding model name, type str
93
- - `provider`: Embedding model provider name, type str, optional
94
- - `kwargs`: Other additional parameters
99
+ | Parameter | Type | Required | Default | Description |
100
+ |-----------|------|----------|---------|-------------|
101
+ | `model` | str | Yes | - | Embedding model name |
102
+ | `provider` | str | No | - | Embedding model provider name |
103
+ | `kwargs` | dict | No | - | Other additional parameters |
95
104
 
96
105
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
97
106
 
@@ -125,9 +134,11 @@ Includes the following features:
125
134
 
126
135
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
127
136
 
128
- `merge_ai_message_chunk` parameter description:
137
+ **`merge_ai_message_chunk` Parameters:**
129
138
 
130
- - `chunks`: List of AIMessageChunk
139
+ | Parameter | Type | Required | Default | Description |
140
+ |-----------|------|----------|---------|-------------|
141
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
131
142
 
132
143
  ```python
133
144
  chunks = list(model.stream("Hello"))
@@ -138,14 +149,13 @@ merged = merge_ai_message_chunk(chunks)
138
149
 
139
150
  For a list, you can use `format_sequence` to format it.
140
151
 
141
- `format_sequence` parameter description:
152
+ **`format_sequence` Parameters:**
142
153
 
143
- - `inputs`: A list containing any of the following types:
144
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
145
- - langchain_core.documents.Document
146
- - str
147
- - `separator`: String used to join the content, defaults to "-".
148
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
154
+ | Parameter | Type | Required | Default | Description |
155
+ |-----------|------|----------|---------|-------------|
156
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
157
+ | `separator` | str | No | "-" | String used to join the content |
158
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
149
159
 
150
160
  ```python
151
161
  text = format_sequence([
@@ -168,14 +178,18 @@ Includes the following features:
168
178
 
169
179
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
170
180
 
171
- `has_tool_calling` parameter description:
181
+ **`has_tool_calling` Parameters:**
172
182
 
173
- - `message`: AIMessage object
183
+ | Parameter | Type | Required | Default | Description |
184
+ |-----------|------|----------|---------|-------------|
185
+ | `message` | AIMessage | Yes | - | AIMessage object |
174
186
 
175
- `parse_tool_calling` parameter description:
187
+ **`parse_tool_calling` Parameters:**
176
188
 
177
- - `message`: AIMessage object
178
- - `first_tool_call_only`: Whether to only check the first tool call
189
+ | Parameter | Type | Required | Default | Description |
190
+ |-----------|------|----------|---------|-------------|
191
+ | `message` | AIMessage | Yes | - | AIMessage object |
192
+ | `first_tool_call_only` | bool | No | False | Whether to only check the first tool call |
179
193
 
180
194
  ```python
181
195
  import datetime
@@ -228,6 +242,13 @@ Includes the following features:
228
242
 
229
243
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
230
244
 
245
+ **`create_agent` Parameters:**
246
+
247
+ | Parameter | Type | Required | Default | Description |
248
+ |-----------|------|----------|---------|-------------|
249
+ | `model` | str \| BaseChatModel | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
250
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
251
+
231
252
  Usage example:
232
253
 
233
254
  ```python
@@ -276,15 +297,19 @@ Includes the following features:
276
297
  Sequential graph orchestration:
277
298
  Uses `create_sequential_pipeline`, supported parameters:
278
299
 
279
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
280
- - `state_schema`: State Schema for the final generated graph
281
- - `graph_name`: Name of the final generated graph (optional)
282
- - `context_schema`: Context Schema for the final generated graph (optional)
283
- - `input_schema`: Input Schema for the final generated graph (optional)
284
- - `output_schema`: Output Schema for the final generated graph (optional)
285
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
286
- - `store`: LangGraph persistence Store (optional)
287
- - `cache`: LangGraph Cache (optional)
300
+ **`create_sequential_pipeline` Parameters:**
301
+
302
+ | Parameter | Type | Required | Default | Description |
303
+ |-----------|------|----------|---------|-------------|
304
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances) |
305
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
306
+ | `graph_name` | str | No | - | Name of the final generated graph |
307
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
308
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
309
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
310
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
311
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
312
+ | `cache` | BaseCache | No | - | LangGraph Cache |
288
313
 
289
314
  ```python
290
315
  from langchain.agents import AgentState
@@ -333,16 +358,20 @@ print(response)
333
358
  Parallel graph orchestration:
334
359
  Uses `create_parallel_pipeline`, supported parameters:
335
360
 
336
- - `sub_graphs`: List of state graphs to combine
337
- - `state_schema`: State Schema for the final generated graph
338
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
339
- - `graph_name`: Name of the final generated graph (optional)
340
- - `context_schema`: Context Schema for the final generated graph (optional)
341
- - `input_schema`: Input Schema for the final generated graph (optional)
342
- - `output_schema`: Output Schema for the final generated graph (optional)
343
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
344
- - `store`: LangGraph persistence Store (optional)
345
- - `cache`: LangGraph Cache (optional)
361
+ **`create_parallel_pipeline` Parameters:**
362
+
363
+ | Parameter | Type | Required | Default | Description |
364
+ |-----------|------|----------|---------|-------------|
365
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine |
366
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
367
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
368
+ | `graph_name` | str | No | - | Name of the final generated graph |
369
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
370
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
371
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
372
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
373
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
374
+ | `cache` | BaseCache | No | - | LangGraph Cache |
346
375
 
347
376
  ```python
348
377
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -381,4 +410,4 @@ print(response)
381
410
 
382
411
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
383
412
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
384
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
413
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!