langchain-dev-utils 1.2.4__tar.gz → 1.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/PKG-INFO +81 -53
  2. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/README.md +80 -52
  3. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/README_cn.md +81 -53
  4. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/pyproject.toml +1 -1
  5. langchain_dev_utils-1.2.5/src/langchain_dev_utils/__init__.py +1 -0
  6. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/factory.py +2 -1
  7. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -18
  8. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +28 -14
  9. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/base.py +59 -47
  10. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/types.py +2 -1
  11. langchain_dev_utils-1.2.5/tests/__init__.py +38 -0
  12. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_agent.py +0 -15
  13. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_chat_models.py +1 -12
  14. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_human_in_the_loop.py +1 -6
  15. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_load_embbeding.py +1 -19
  16. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_load_model.py +1 -23
  17. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_model_tool_emulator.py +0 -9
  18. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_plan_middleware.py +0 -5
  19. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_router_model.py +0 -15
  20. langchain_dev_utils-1.2.4/src/langchain_dev_utils/__init__.py +0 -1
  21. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/.gitignore +0 -0
  22. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/.python-version +0 -0
  23. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/.vscode/settings.json +0 -0
  24. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/LICENSE +0 -0
  25. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/__init__.py +0 -0
  26. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/file_system.py +0 -0
  27. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  28. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  29. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  30. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  31. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  32. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  33. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/plan.py +0 -0
  34. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/agents/wrap.py +0 -0
  35. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  36. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  37. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  38. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/embeddings/base.py +0 -0
  39. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  40. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/message_convert/content.py +0 -0
  41. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/message_convert/format.py +0 -0
  42. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  43. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  44. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  45. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/pipeline/types.py +0 -0
  46. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/py.typed +0 -0
  47. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  48. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  49. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  50. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_messages.py +0 -0
  51. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_pipline.py +0 -0
  52. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_tool_calling.py +0 -0
  53. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/tests/test_wrap_agent.py +0 -0
  54. {langchain_dev_utils-1.2.4 → langchain_dev_utils-1.2.5}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.4
3
+ Version: 1.2.5
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -57,19 +57,23 @@ Mainly consists of the following two functions:
57
57
  - `register_model_provider`: Register a chat model provider
58
58
  - `load_chat_model`: Load a chat model
59
59
 
60
- `register_model_provider` parameter description:
60
+ **`register_model_provider` Parameters:**
61
61
 
62
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
63
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
64
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
65
- - `provider_profile`: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
66
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
62
+ | Parameter | Type | Required | Default | Description |
63
+ |-----------|------|----------|---------|-------------|
64
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
67
69
 
68
- `load_chat_model` parameter description:
70
+ **`load_chat_model` Parameters:**
69
71
 
70
- - `model`: Chat model name, type str
71
- - `model_provider`: Chat model provider name, type str, optional
72
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
72
+ | Parameter | Type | Required | Default | Description |
73
+ |-----------|------|----------|---------|-------------|
74
+ | `model` | str | Yes | - | Chat model name |
75
+ | `model_provider` | str | No | - | Chat model provider name |
76
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
73
77
 
74
78
  Example for integrating a qwen3-4b model deployed using `vllm`:
75
79
 
@@ -98,17 +102,21 @@ Mainly consists of the following two functions:
98
102
  - `register_embeddings_provider`: Register an embedding model provider
99
103
  - `load_embeddings`: Load an embedding model
100
104
 
101
- `register_embeddings_provider` parameter description:
105
+ **`register_embeddings_provider` Parameters:**
102
106
 
103
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
104
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
105
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
107
+ | Parameter | Type | Required | Default | Description |
108
+ |-----------|------|----------|---------|-------------|
109
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
106
112
 
107
- `load_embeddings` parameter description:
113
+ **`load_embeddings` Parameters:**
108
114
 
109
- - `model`: Embedding model name, type str
110
- - `provider`: Embedding model provider name, type str, optional
111
- - `kwargs`: Other additional parameters
115
+ | Parameter | Type | Required | Default | Description |
116
+ |-----------|------|----------|---------|-------------|
117
+ | `model` | str | Yes | - | Embedding model name |
118
+ | `provider` | str | No | - | Embedding model provider name |
119
+ | `kwargs` | dict | No | - | Other additional parameters |
112
120
 
113
121
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
114
122
 
@@ -142,9 +150,11 @@ Includes the following features:
142
150
 
143
151
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
144
152
 
145
- `merge_ai_message_chunk` parameter description:
153
+ **`merge_ai_message_chunk` Parameters:**
146
154
 
147
- - `chunks`: List of AIMessageChunk
155
+ | Parameter | Type | Required | Default | Description |
156
+ |-----------|------|----------|---------|-------------|
157
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
148
158
 
149
159
  ```python
150
160
  chunks = list(model.stream("Hello"))
@@ -155,14 +165,13 @@ merged = merge_ai_message_chunk(chunks)
155
165
 
156
166
  For a list, you can use `format_sequence` to format it.
157
167
 
158
- `format_sequence` parameter description:
168
+ **`format_sequence` Parameters:**
159
169
 
160
- - `inputs`: A list containing any of the following types:
161
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
162
- - langchain_core.documents.Document
163
- - str
164
- - `separator`: String used to join the content, defaults to "-".
165
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
170
+ | Parameter | Type | Required | Default | Description |
171
+ |-----------|------|----------|---------|-------------|
172
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
173
+ | `separator` | str | No | "-" | String used to join the content |
174
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
166
175
 
167
176
  ```python
168
177
  text = format_sequence([
@@ -185,14 +194,18 @@ Includes the following features:
185
194
 
186
195
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
187
196
 
188
- `has_tool_calling` parameter description:
197
+ **`has_tool_calling` Parameters:**
189
198
 
190
- - `message`: AIMessage object
199
+ | Parameter | Type | Required | Default | Description |
200
+ |-----------|------|----------|---------|-------------|
201
+ | `message` | AIMessage | Yes | - | AIMessage object |
191
202
 
192
- `parse_tool_calling` parameter description:
203
+ **`parse_tool_calling` Parameters:**
193
204
 
194
- - `message`: AIMessage object
195
- - `first_tool_call_only`: Whether to only check the first tool call
205
+ | Parameter | Type | Required | Default | Description |
206
+ |-----------|------|----------|---------|-------------|
207
+ | `message` | AIMessage | Yes | - | AIMessage object |
208
+ | `first_tool_call_only` | bool | No | False | Whether to only check the first tool call |
196
209
 
197
210
  ```python
198
211
  import datetime
@@ -245,6 +258,13 @@ Includes the following features:
245
258
 
246
259
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
247
260
 
261
+ **`create_agent` Parameters:**
262
+
263
+ | Parameter | Type | Required | Default | Description |
264
+ |-----------|------|----------|---------|-------------|
265
+ | `model` | str \| BaseChatModel | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
266
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
267
+
248
268
  Usage example:
249
269
 
250
270
  ```python
@@ -293,15 +313,19 @@ Includes the following features:
293
313
  Sequential graph orchestration:
294
314
  Uses `create_sequential_pipeline`, supported parameters:
295
315
 
296
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
297
- - `state_schema`: State Schema for the final generated graph
298
- - `graph_name`: Name of the final generated graph (optional)
299
- - `context_schema`: Context Schema for the final generated graph (optional)
300
- - `input_schema`: Input Schema for the final generated graph (optional)
301
- - `output_schema`: Output Schema for the final generated graph (optional)
302
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
303
- - `store`: LangGraph persistence Store (optional)
304
- - `cache`: LangGraph Cache (optional)
316
+ **`create_sequential_pipeline` Parameters:**
317
+
318
+ | Parameter | Type | Required | Default | Description |
319
+ |-----------|------|----------|---------|-------------|
320
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances) |
321
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
322
+ | `graph_name` | str | No | - | Name of the final generated graph |
323
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
324
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
325
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
326
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
327
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
328
+ | `cache` | BaseCache | No | - | LangGraph Cache |
305
329
 
306
330
  ```python
307
331
  from langchain.agents import AgentState
@@ -350,16 +374,20 @@ print(response)
350
374
  Parallel graph orchestration:
351
375
  Uses `create_parallel_pipeline`, supported parameters:
352
376
 
353
- - `sub_graphs`: List of state graphs to combine
354
- - `state_schema`: State Schema for the final generated graph
355
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
356
- - `graph_name`: Name of the final generated graph (optional)
357
- - `context_schema`: Context Schema for the final generated graph (optional)
358
- - `input_schema`: Input Schema for the final generated graph (optional)
359
- - `output_schema`: Output Schema for the final generated graph (optional)
360
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
361
- - `store`: LangGraph persistence Store (optional)
362
- - `cache`: LangGraph Cache (optional)
377
+ **`create_parallel_pipeline` Parameters:**
378
+
379
+ | Parameter | Type | Required | Default | Description |
380
+ |-----------|------|----------|---------|-------------|
381
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine |
382
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
383
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
384
+ | `graph_name` | str | No | - | Name of the final generated graph |
385
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
386
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
387
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
388
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
389
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
390
+ | `cache` | BaseCache | No | - | LangGraph Cache |
363
391
 
364
392
  ```python
365
393
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -398,4 +426,4 @@ print(response)
398
426
 
399
427
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
400
428
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
401
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
429
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -41,19 +41,23 @@ Mainly consists of the following two functions:
41
41
  - `register_model_provider`: Register a chat model provider
42
42
  - `load_chat_model`: Load a chat model
43
43
 
44
- `register_model_provider` parameter description:
44
+ **`register_model_provider` Parameters:**
45
45
 
46
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
47
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
48
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
49
- - `provider_profile`: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
50
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
46
+ | Parameter | Type | Required | Default | Description |
47
+ |-----------|------|----------|---------|-------------|
48
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
49
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
50
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
51
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
52
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
51
53
 
52
- `load_chat_model` parameter description:
54
+ **`load_chat_model` Parameters:**
53
55
 
54
- - `model`: Chat model name, type str
55
- - `model_provider`: Chat model provider name, type str, optional
56
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
56
+ | Parameter | Type | Required | Default | Description |
57
+ |-----------|------|----------|---------|-------------|
58
+ | `model` | str | Yes | - | Chat model name |
59
+ | `model_provider` | str | No | - | Chat model provider name |
60
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
57
61
 
58
62
  Example for integrating a qwen3-4b model deployed using `vllm`:
59
63
 
@@ -82,17 +86,21 @@ Mainly consists of the following two functions:
82
86
  - `register_embeddings_provider`: Register an embedding model provider
83
87
  - `load_embeddings`: Load an embedding model
84
88
 
85
- `register_embeddings_provider` parameter description:
89
+ **`register_embeddings_provider` Parameters:**
86
90
 
87
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
88
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
89
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
91
+ | Parameter | Type | Required | Default | Description |
92
+ |-----------|------|----------|---------|-------------|
93
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
94
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
95
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
90
96
 
91
- `load_embeddings` parameter description:
97
+ **`load_embeddings` Parameters:**
92
98
 
93
- - `model`: Embedding model name, type str
94
- - `provider`: Embedding model provider name, type str, optional
95
- - `kwargs`: Other additional parameters
99
+ | Parameter | Type | Required | Default | Description |
100
+ |-----------|------|----------|---------|-------------|
101
+ | `model` | str | Yes | - | Embedding model name |
102
+ | `provider` | str | No | - | Embedding model provider name |
103
+ | `kwargs` | dict | No | - | Other additional parameters |
96
104
 
97
105
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
98
106
 
@@ -126,9 +134,11 @@ Includes the following features:
126
134
 
127
135
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
128
136
 
129
- `merge_ai_message_chunk` parameter description:
137
+ **`merge_ai_message_chunk` Parameters:**
130
138
 
131
- - `chunks`: List of AIMessageChunk
139
+ | Parameter | Type | Required | Default | Description |
140
+ |-----------|------|----------|---------|-------------|
141
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
132
142
 
133
143
  ```python
134
144
  chunks = list(model.stream("Hello"))
@@ -139,14 +149,13 @@ merged = merge_ai_message_chunk(chunks)
139
149
 
140
150
  For a list, you can use `format_sequence` to format it.
141
151
 
142
- `format_sequence` parameter description:
152
+ **`format_sequence` Parameters:**
143
153
 
144
- - `inputs`: A list containing any of the following types:
145
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
146
- - langchain_core.documents.Document
147
- - str
148
- - `separator`: String used to join the content, defaults to "-".
149
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
154
+ | Parameter | Type | Required | Default | Description |
155
+ |-----------|------|----------|---------|-------------|
156
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
157
+ | `separator` | str | No | "-" | String used to join the content |
158
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
150
159
 
151
160
  ```python
152
161
  text = format_sequence([
@@ -169,14 +178,18 @@ Includes the following features:
169
178
 
170
179
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
171
180
 
172
- `has_tool_calling` parameter description:
181
+ **`has_tool_calling` Parameters:**
173
182
 
174
- - `message`: AIMessage object
183
+ | Parameter | Type | Required | Default | Description |
184
+ |-----------|------|----------|---------|-------------|
185
+ | `message` | AIMessage | Yes | - | AIMessage object |
175
186
 
176
- `parse_tool_calling` parameter description:
187
+ **`parse_tool_calling` Parameters:**
177
188
 
178
- - `message`: AIMessage object
179
- - `first_tool_call_only`: Whether to only check the first tool call
189
+ | Parameter | Type | Required | Default | Description |
190
+ |-----------|------|----------|---------|-------------|
191
+ | `message` | AIMessage | Yes | - | AIMessage object |
192
+ | `first_tool_call_only` | bool | No | False | Whether to only check the first tool call |
180
193
 
181
194
  ```python
182
195
  import datetime
@@ -229,6 +242,13 @@ Includes the following features:
229
242
 
230
243
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
231
244
 
245
+ **`create_agent` Parameters:**
246
+
247
+ | Parameter | Type | Required | Default | Description |
248
+ |-----------|------|----------|---------|-------------|
249
+ | `model` | str \| BaseChatModel | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
250
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
251
+
232
252
  Usage example:
233
253
 
234
254
  ```python
@@ -277,15 +297,19 @@ Includes the following features:
277
297
  Sequential graph orchestration:
278
298
  Uses `create_sequential_pipeline`, supported parameters:
279
299
 
280
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
281
- - `state_schema`: State Schema for the final generated graph
282
- - `graph_name`: Name of the final generated graph (optional)
283
- - `context_schema`: Context Schema for the final generated graph (optional)
284
- - `input_schema`: Input Schema for the final generated graph (optional)
285
- - `output_schema`: Output Schema for the final generated graph (optional)
286
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
287
- - `store`: LangGraph persistence Store (optional)
288
- - `cache`: LangGraph Cache (optional)
300
+ **`create_sequential_pipeline` Parameters:**
301
+
302
+ | Parameter | Type | Required | Default | Description |
303
+ |-----------|------|----------|---------|-------------|
304
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances) |
305
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
306
+ | `graph_name` | str | No | - | Name of the final generated graph |
307
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
308
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
309
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
310
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
311
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
312
+ | `cache` | BaseCache | No | - | LangGraph Cache |
289
313
 
290
314
  ```python
291
315
  from langchain.agents import AgentState
@@ -334,16 +358,20 @@ print(response)
334
358
  Parallel graph orchestration:
335
359
  Uses `create_parallel_pipeline`, supported parameters:
336
360
 
337
- - `sub_graphs`: List of state graphs to combine
338
- - `state_schema`: State Schema for the final generated graph
339
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
340
- - `graph_name`: Name of the final generated graph (optional)
341
- - `context_schema`: Context Schema for the final generated graph (optional)
342
- - `input_schema`: Input Schema for the final generated graph (optional)
343
- - `output_schema`: Output Schema for the final generated graph (optional)
344
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
345
- - `store`: LangGraph persistence Store (optional)
346
- - `cache`: LangGraph Cache (optional)
361
+ **`create_parallel_pipeline` Parameters:**
362
+
363
+ | Parameter | Type | Required | Default | Description |
364
+ |-----------|------|----------|---------|-------------|
365
+ | `sub_graphs` | List[StateGraph] | Yes | - | List of state graphs to combine |
366
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
367
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
368
+ | `graph_name` | str | No | - | Name of the final generated graph |
369
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
370
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
371
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
372
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
373
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
374
+ | `cache` | BaseCache | No | - | LangGraph Cache |
347
375
 
348
376
  ```python
349
377
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -382,4 +410,4 @@ print(response)
382
410
 
383
411
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
384
412
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
385
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
413
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!