langchain-dev-utils 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. langchain_dev_utils/__init__.py +1 -1
  2. langchain_dev_utils/_utils.py +9 -5
  3. langchain_dev_utils/agents/__init__.py +0 -1
  4. langchain_dev_utils/agents/factory.py +2 -10
  5. langchain_dev_utils/agents/file_system.py +1 -1
  6. langchain_dev_utils/agents/middleware/__init__.py +2 -0
  7. langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
  8. langchain_dev_utils/agents/middleware/model_router.py +37 -46
  9. langchain_dev_utils/agents/middleware/plan.py +17 -18
  10. langchain_dev_utils/agents/middleware/summarization.py +6 -4
  11. langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  12. langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
  13. langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
  14. langchain_dev_utils/agents/plan.py +1 -1
  15. langchain_dev_utils/agents/wrap.py +8 -20
  16. langchain_dev_utils/chat_models/adapters/openai_compatible.py +105 -59
  17. langchain_dev_utils/chat_models/base.py +30 -15
  18. langchain_dev_utils/chat_models/types.py +6 -3
  19. langchain_dev_utils/embeddings/base.py +35 -18
  20. langchain_dev_utils/message_convert/__init__.py +0 -1
  21. langchain_dev_utils/message_convert/content.py +8 -11
  22. langchain_dev_utils/message_convert/format.py +2 -2
  23. langchain_dev_utils/pipeline/parallel.py +10 -41
  24. langchain_dev_utils/pipeline/sequential.py +6 -21
  25. langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
  26. langchain_dev_utils/tool_calling/utils.py +3 -3
  27. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.8.dist-info}/METADATA +24 -119
  28. langchain_dev_utils-1.2.8.dist-info/RECORD +37 -0
  29. langchain_dev_utils-1.2.6.dist-info/RECORD +0 -36
  30. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.8.dist-info}/WHEEL +0 -0
  31. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.8.dist-info}/licenses/LICENSE +0 -0
@@ -38,41 +38,27 @@ def create_parallel_pipeline(
38
38
  sub_graphs: List of sub-graphs to execute in parallel
39
39
  state_schema: state schema of the final constructed graph
40
40
  graph_name: Name of the final constructed graph
41
- branches_fn: Optional function to determine which sub-graphs to execute in parallel
41
+ branches_fn: Optional function to determine which sub-graphs to execute
42
+ in parallel
42
43
  context_schema: context schema of the final constructed graph
43
44
  input_schema: input schema of the final constructed graph
44
45
  output_schema: output schema of the final constructed graph
45
- checkpointer: Optional LangGraph checkpointer for the final constructed graph
46
+ checkpointer: Optional LangGraph checkpointer for the final constructed
47
+ graph
46
48
  store: Optional LangGraph store for the final constructed graph
47
49
  cache: Optional LangGraph cache for the final constructed graph
48
50
 
49
51
  Returns:
50
- CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state graph of the pipeline.
52
+ CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
53
+ graph of the pipeline.
51
54
 
52
55
  Example:
53
- Basic parallel pipeline with multiple specialized agents:
56
+ # Basic parallel pipeline: multiple specialized agents run concurrently
54
57
  >>> from langchain_dev_utils.pipeline import create_parallel_pipeline
55
58
  >>>
56
59
  >>> graph = create_parallel_pipeline(
57
60
  ... sub_graphs=[
58
- ... create_agent(
59
- ... model="vllm:qwen3-4b",
60
- ... tools=[get_current_time],
61
- ... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
62
- ... name="time_agent",
63
- ... ),
64
- ... create_agent(
65
- ... model="vllm:qwen3-4b",
66
- ... tools=[get_current_weather],
67
- ... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
68
- ... name="weather_agent",
69
- ... ),
70
- ... create_agent(
71
- ... model="vllm:qwen3-4b",
72
- ... tools=[get_current_user],
73
- ... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
74
- ... name="user_agent",
75
- ... ),
61
+ ... time_agent, weather_agent, user_agent
76
62
  ... ],
77
63
  ... state_schema=AgentState,
78
64
  ... graph_name="parallel_agents_pipeline",
@@ -80,27 +66,10 @@ def create_parallel_pipeline(
80
66
  >>>
81
67
  >>> response = graph.invoke({"messages": [HumanMessage("Hello")]})
82
68
 
83
- set branch_fn:
69
+ # Dynamic parallel pipeline: decide which agents to run based on conditional branches
84
70
  >>> graph = create_parallel_pipeline(
85
71
  ... sub_graphs=[
86
- ... create_agent(
87
- ... model="vllm:qwen3-4b",
88
- ... tools=[get_current_time],
89
- ... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
90
- ... name="time_agent",
91
- ... ),
92
- ... create_agent(
93
- ... model="vllm:qwen3-4b",
94
- ... tools=[get_current_weather],
95
- ... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
96
- ... name="weather_agent",
97
- ... ),
98
- ... create_agent(
99
- ... model="vllm:qwen3-4b",
100
- ... tools=[get_current_user],
101
- ... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
102
- ... name="user_agent",
103
- ... ),
72
+ ... time_agent, weather_agent, user_agent
104
73
  ... ],
105
74
  ... state_schema=AgentState,
106
75
  ... branches_fn=lambda state: [
@@ -35,37 +35,22 @@ def create_sequential_pipeline(
35
35
  context_schema: context schema of the final constructed graph
36
36
  input_schema: input schema of the final constructed graph
37
37
  output_schema: output schema of the final constructed graph
38
- checkpointer: Optional LangGraph checkpointer for the final constructed graph
38
+ checkpointer: Optional LangGraph checkpointer for the final constructed
39
+ graph
39
40
  store: Optional LangGraph store for the final constructed graph
40
41
  cache: Optional LangGraph cache for the final constructed graph
41
42
 
42
43
  Returns:
43
- CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state graph of the pipeline.
44
+ CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
45
+ graph of the pipeline.
44
46
 
45
47
  Example:
46
- Basic sequential pipeline with multiple specialized agents:
48
+ # Basic sequential pipeline with multiple specialized agents:
47
49
  >>> from langchain_dev_utils.pipeline import create_sequential_pipeline
48
50
  >>>
49
51
  >>> graph = create_sequential_pipeline(
50
52
  ... sub_graphs=[
51
- ... create_agent(
52
- ... model="vllm:qwen3-4b",
53
- ... tools=[get_current_time],
54
- ... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
55
- ... name="time_agent",
56
- ... ),
57
- ... create_agent(
58
- ... model="vllm:qwen3-4b",
59
- ... tools=[get_current_weather],
60
- ... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
61
- ... name="weather_agent",
62
- ... ),
63
- ... create_agent(
64
- ... model="vllm:qwen3-4b",
65
- ... tools=[get_current_user],
66
- ... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
67
- ... name="user_agent",
68
- ... ),
53
+ ... time_agent, weather_agent, user_agent
69
54
  ... ],
70
55
  ... state_schema=AgentState,
71
56
  ... graph_name="sequential_agents_pipeline",
@@ -133,7 +133,7 @@ def human_in_the_loop(
133
133
  If `func` is None, returns a decorator that will decorate the target function.
134
134
 
135
135
  Example:
136
- Basic usage with default handler:
136
+ # Basic usage with default handler:
137
137
  >>> from langchain_dev_utils.tool_calling import human_in_the_loop
138
138
  >>> from langchain_core.tools import tool
139
139
  >>> import datetime
@@ -144,10 +144,10 @@ def human_in_the_loop(
144
144
  ... \"\"\"Get current timestamp\"\"\"
145
145
  ... return str(datetime.datetime.now().timestamp())
146
146
 
147
- Usage with custom handler:
147
+ # Usage with custom handler:
148
148
  >>> def custom_handler(params: InterruptParams) -> Any:
149
149
  ... response = interrupt(
150
- ... f"I am about to invoke tool '{params['tool_call_name']}' with arguments {params['tool_call_args']}. Please confirm whether to proceed."
150
+ ... # Please add your custom interrupt response content here
151
151
  ... )
152
152
  ... if response["type"] == "accept":
153
153
  ... return params["tool"].invoke(params["tool_call_args"])
@@ -219,7 +219,7 @@ def human_in_the_loop_async(
219
219
  If `func` is None, returns a decorator that will decorate the target function.
220
220
 
221
221
  Example:
222
- Basic usage with default handler:
222
+ # Basic usage with default handler:
223
223
  >>> from langchain_dev_utils.tool_calling import human_in_the_loop_async
224
224
  >>> from langchain_core.tools import tool
225
225
  >>> import asyncio
@@ -232,10 +232,10 @@ def human_in_the_loop_async(
232
232
  ... await asyncio.sleep(1)
233
233
  ... return str(datetime.datetime.now().timestamp())
234
234
 
235
- Usage with custom handler:
235
+ # Usage with custom handler:
236
236
  >>> async def custom_handler(params: InterruptParams) -> Any:
237
237
  ... response = interrupt(
238
- ... f"I am about to invoke tool '{params['tool_call_name']}' with arguments {params['tool_call_args']}. Please confirm whether to proceed."
238
+ ... ... # Please add your custom interrupt response content here
239
239
  ... )
240
240
  ... if response["type"] == "accept":
241
241
  ... return await params["tool"].ainvoke(params["tool_call_args"])
@@ -16,7 +16,7 @@ def has_tool_calling(message: AIMessage) -> bool:
16
16
  bool: True if message is an AIMessage with tool calls, False otherwise
17
17
 
18
18
  Example:
19
- Check for tool calls in response:
19
+ # Check for tool calls in response:
20
20
  >>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
21
21
  >>> response = model.invoke("What time is it now?")
22
22
  >>> if has_tool_calling(response):
@@ -50,14 +50,14 @@ def parse_tool_calling(
50
50
  Union[tuple[str, dict], list[tuple[str, dict]]]: The tool call name and args
51
51
 
52
52
  Example:
53
- Parse single tool call:
53
+ # Parse single tool call:
54
54
  >>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
55
55
  >>> response = model.invoke("What time is it now?")
56
56
  >>> response
57
57
  >>> if has_tool_calling(response):
58
58
  ... tool_name, tool_args = parse_tool_calling(response, first_tool_call_only=True)
59
59
 
60
- Parse multiple tool calls:
60
+ # Parse multiple tool calls:
61
61
  >>> if has_tool_calling(response):
62
62
  ... tool_calls = parse_tool_calling(response)
63
63
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.6
3
+ Version: 1.2.8
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -11,6 +11,7 @@ Requires-Python: >=3.11
11
11
  Requires-Dist: langchain>=1.1.0
12
12
  Requires-Dist: langgraph>=1.0.0
13
13
  Provides-Extra: standard
14
+ Requires-Dist: json-repair>=0.53.1; extra == 'standard'
14
15
  Requires-Dist: langchain-openai; extra == 'standard'
15
16
  Description-Content-Type: text/markdown
16
17
 
@@ -57,24 +58,6 @@ Mainly consists of the following two functions:
57
58
  - `register_model_provider`: Register a chat model provider
58
59
  - `load_chat_model`: Load a chat model
59
60
 
60
- **`register_model_provider` Parameters:**
61
-
62
- | Parameter | Type | Required | Default | Description |
63
- |-----------|------|----------|---------|-------------|
64
- | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
- | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
- | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
- | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
- | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
69
-
70
- **`load_chat_model` Parameters:**
71
-
72
- | Parameter | Type | Required | Default | Description |
73
- |-----------|------|----------|---------|-------------|
74
- | `model` | str | Yes | - | Chat model name |
75
- | `model_provider` | str | No | - | Chat model provider name |
76
- | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
77
-
78
61
  Example for integrating a qwen3-4b model deployed using `vllm`:
79
62
 
80
63
  ```python
@@ -102,22 +85,6 @@ Mainly consists of the following two functions:
102
85
  - `register_embeddings_provider`: Register an embedding model provider
103
86
  - `load_embeddings`: Load an embedding model
104
87
 
105
- **`register_embeddings_provider` Parameters:**
106
-
107
- | Parameter | Type | Required | Default | Description |
108
- |-----------|------|----------|---------|-------------|
109
- | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
- | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
- | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
112
-
113
- **`load_embeddings` Parameters:**
114
-
115
- | Parameter | Type | Required | Default | Description |
116
- |-----------|------|----------|---------|-------------|
117
- | `model` | str | Yes | - | Embedding model name |
118
- | `provider` | str | No | - | Embedding model provider name |
119
- | `kwargs` | dict | No | - | Other additional parameters |
120
-
121
88
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
122
89
 
123
90
  ```python
@@ -150,12 +117,6 @@ Includes the following features:
150
117
 
151
118
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
152
119
 
153
- **`merge_ai_message_chunk` Parameters:**
154
-
155
- | Parameter | Type | Required | Default | Description |
156
- |-----------|------|----------|---------|-------------|
157
- | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
158
-
159
120
  ```python
160
121
  from langchain_dev_utils.message_convert import merge_ai_message_chunk
161
122
 
@@ -167,14 +128,6 @@ merged = merge_ai_message_chunk(chunks)
167
128
 
168
129
  For a list, you can use `format_sequence` to format it.
169
130
 
170
- **`format_sequence` Parameters:**
171
-
172
- | Parameter | Type | Required | Default | Description |
173
- |-----------|------|----------|---------|-------------|
174
- | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
175
- | `separator` | str | No | "-" | String used to join the content |
176
- | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
177
-
178
131
  ```python
179
132
  from langchain_dev_utils.message_convert import format_sequence
180
133
  text = format_sequence([
@@ -197,19 +150,6 @@ Includes the following features:
197
150
 
198
151
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
199
152
 
200
- **`has_tool_calling` Parameters:**
201
-
202
- | Parameter | Type | Required | Default | Description |
203
- |-----------|------|----------|---------|-------------|
204
- | `message` | AIMessage | Yes | - | AIMessage object |
205
-
206
- **`parse_tool_calling` Parameters:**
207
-
208
- | Parameter | Type | Required | Default | Description |
209
- |-----------|------|----------|---------|-------------|
210
- | `message` | AIMessage | Yes | - | AIMessage object |
211
- | `first_tool_call_only` | bool | No | False | Whether to only parse the first tool call |
212
-
213
153
  ```python
214
154
  import datetime
215
155
  from langchain_core.tools import tool
@@ -259,14 +199,7 @@ Includes the following features:
259
199
 
260
200
  #### 4.1 Agent Factory Functions
261
201
 
262
- In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
263
-
264
- **`create_agent` Parameters:**
265
-
266
- | Parameter | Type | Required | Default | Description |
267
- |-----------|------|----------|---------|-------------|
268
- | `model` | str | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
269
- | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
202
+ In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
270
203
 
271
204
  Usage example:
272
205
 
@@ -281,24 +214,26 @@ print(response)
281
214
 
282
215
  #### 4.2 Middleware
283
216
 
284
- Provides some commonly used middleware components. Below are examples of `SummarizationMiddleware` and `PlanMiddleware`.
217
+ Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
285
218
 
286
- `SummarizationMiddleware` is used for agent summarization.
219
+ `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
287
220
 
288
221
  `PlanMiddleware` is used for agent planning.
289
222
 
290
223
  ```python
291
224
  from langchain_dev_utils.agents.middleware import (
292
- SummarizationMiddleware,
225
+ ToolcallRepairMiddleware,
293
226
  PlanMiddleware,
294
227
  )
295
228
 
296
- agent=create_agent(
229
+ agent = create_agent(
297
230
  "vllm:qwen3-4b",
298
231
  name="plan-agent",
299
- middleware=[PlanMiddleware(), SummarizationMiddleware(model="vllm:qwen3-4b")]
232
+ middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
233
+ use_read_plan_tool=False
234
+ )]
300
235
  )
301
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]}))
236
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
302
237
  print(response)
303
238
  ```
304
239
 
@@ -306,29 +241,14 @@ print(response)
306
241
 
307
242
  ### 5. **State Graph Orchestration**
308
243
 
309
- Includes the following features:
244
+ Includes the following capabilities:
310
245
 
311
246
  - Sequential graph orchestration
312
247
  - Parallel graph orchestration
313
248
 
314
249
  #### 5.1 Sequential Graph Orchestration
315
250
 
316
- Sequential graph orchestration:
317
- Uses `create_sequential_pipeline`, supported parameters:
318
-
319
- **`create_sequential_pipeline` Parameters:**
320
-
321
- | Parameter | Type | Required | Default | Description |
322
- |-----------|------|----------|---------|-------------|
323
- | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
324
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
325
- | `graph_name` | str | No | - | Name of the final generated graph |
326
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
327
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
328
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
329
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
330
- | `store` | BaseStore | No | - | LangGraph persistence Store |
331
- | `cache` | BaseCache | No | - | LangGraph Cache |
251
+ Use `create_sequential_pipeline` to orchestrate multiple subgraphs in sequential order:
332
252
 
333
253
  ```python
334
254
  from langchain.agents import AgentState
@@ -343,25 +263,25 @@ register_model_provider(
343
263
  base_url="http://localhost:8000/v1",
344
264
  )
345
265
 
346
- # Build sequential pipeline (all sub-graphs execute sequentially)
266
+ # Build a sequential pipeline (all subgraphs executed in order)
347
267
  graph = create_sequential_pipeline(
348
268
  sub_graphs=[
349
269
  create_agent(
350
270
  model="vllm:qwen3-4b",
351
271
  tools=[get_current_time],
352
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
272
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
353
273
  name="time_agent",
354
274
  ),
355
275
  create_agent(
356
276
  model="vllm:qwen3-4b",
357
277
  tools=[get_current_weather],
358
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
278
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
359
279
  name="weather_agent",
360
280
  ),
361
281
  create_agent(
362
282
  model="vllm:qwen3-4b",
363
283
  tools=[get_current_user],
364
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
284
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
365
285
  name="user_agent",
366
286
  ),
367
287
  ],
@@ -374,51 +294,36 @@ print(response)
374
294
 
375
295
  #### 5.2 Parallel Graph Orchestration
376
296
 
377
- Parallel graph orchestration:
378
- Uses `create_parallel_pipeline`, supported parameters:
379
-
380
- **`create_parallel_pipeline` Parameters:**
381
-
382
- | Parameter | Type | Required | Default | Description |
383
- |-----------|------|----------|---------|-------------|
384
- | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
385
- | `state_schema` | type | Yes | - | State Schema for the final generated graph |
386
- | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
387
- | `graph_name` | str | No | - | Name of the final generated graph |
388
- | `context_schema` | type | No | - | Context Schema for the final generated graph |
389
- | `input_schema` | type | No | - | Input Schema for the final generated graph |
390
- | `output_schema` | type | No | - | Output Schema for the final generated graph |
391
- | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
392
- | `store` | BaseStore | No | - | LangGraph persistence Store |
393
- | `cache` | BaseCache | No | - | LangGraph Cache |
297
+ Use `create_parallel_pipeline` to orchestrate multiple subgraphs in parallel:
394
298
 
395
299
  ```python
396
300
  from langchain_dev_utils.pipeline import create_parallel_pipeline
397
301
 
398
- # Build parallel pipeline (all sub-graphs execute in parallel)
302
+ # Build a parallel pipeline (all subgraphs executed concurrently)
399
303
  graph = create_parallel_pipeline(
400
304
  sub_graphs=[
401
305
  create_agent(
402
306
  model="vllm:qwen3-4b",
403
307
  tools=[get_current_time],
404
- system_prompt="You are a time query assistant, can only answer the current time. If the question is unrelated to time, please directly answer that you cannot answer.",
308
+ system_prompt="You are a time-query assistant. You can only answer questions about the current time. If the question is unrelated to time, respond with 'I cannot answer that.'",
405
309
  name="time_agent",
406
310
  ),
407
311
  create_agent(
408
312
  model="vllm:qwen3-4b",
409
313
  tools=[get_current_weather],
410
- system_prompt="You are a weather query assistant, can only answer the current weather. If the question is unrelated to weather, please directly answer that you cannot answer.",
314
+ system_prompt="You are a weather-query assistant. You can only answer questions about the current weather. If the question is unrelated to weather, respond with 'I cannot answer that.'",
411
315
  name="weather_agent",
412
316
  ),
413
317
  create_agent(
414
318
  model="vllm:qwen3-4b",
415
319
  tools=[get_current_user],
416
- system_prompt="You are a user query assistant, can only answer the current user. If the question is unrelated to user, please directly answer that you cannot answer.",
320
+ system_prompt="You are a user-query assistant. You can only answer questions about the current user. If the question is unrelated to the user, respond with 'I cannot answer that.'",
417
321
  name="user_agent",
418
322
  ),
419
323
  ],
420
324
  state_schema=AgentState,
421
325
  )
326
+
422
327
  response = graph.invoke({"messages": [HumanMessage("Hello")]})
423
328
  print(response)
424
329
  ```
@@ -429,4 +334,4 @@ print(response)
429
334
 
430
335
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
431
336
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
432
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
337
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -0,0 +1,37 @@
1
+ langchain_dev_utils/__init__.py,sha256=CfVXm0wwlKPW0khOcwhWw61TpgtZiLijCePsAIOK3aU,22
2
+ langchain_dev_utils/_utils.py,sha256=MFEzR1BjXMj6HEVwt2x2omttFuDJ_rYAEbNqe99r9pM,1338
3
+ langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ langchain_dev_utils/agents/__init__.py,sha256=PJ-lSDZv_AXMYA3H4fx-HzJa14tPbkGmq1HX8LNfaPo,125
5
+ langchain_dev_utils/agents/factory.py,sha256=XdGjktksfTDys7X4SgfPrQz10HUo5fTNAWESDQenIlE,3728
6
+ langchain_dev_utils/agents/file_system.py,sha256=Yk3eetREE26WNrnTWLoiDUpOyCJ-rhjlfFDk6foLa1E,8468
7
+ langchain_dev_utils/agents/plan.py,sha256=WwhoiJBmVYVI9bT8HfjCzTJ_SIp9WFil0gOeznv2omQ,6497
8
+ langchain_dev_utils/agents/wrap.py,sha256=RuchoH_VotPmKFuYEn2SXoSgNxZhSA9jKM0Iv_8oHLk,4718
9
+ langchain_dev_utils/agents/middleware/__init__.py,sha256=sAd0gehREpt0MB5deX5_YmTJPBoqmeSsjNQ-ta6R3EM,768
10
+ langchain_dev_utils/agents/middleware/model_fallback.py,sha256=nivtXXF4cwyOBv6p7RW12nXtNg87wjTWxO3BKIYiroI,1674
11
+ langchain_dev_utils/agents/middleware/model_router.py,sha256=pOK-4PNTLrmjaQA9poHoQnsaVwoX0JeJrLVysulv9iU,7631
12
+ langchain_dev_utils/agents/middleware/plan.py,sha256=0qDCmenxgY_zrwMfOyYlgLfhYNw-HszNLeeOkfj14NA,16002
13
+ langchain_dev_utils/agents/middleware/summarization.py,sha256=IoZ2PM1OC3AXwf0DWpfreuPOAipeiYu0KPmAABWXuY0,3087
14
+ langchain_dev_utils/agents/middleware/tool_call_repair.py,sha256=oZF0Oejemqs9kSn8xbW79FWyVVarL4IGCz0gpqYBkFM,3529
15
+ langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=OgtPhqturaWzF4fRSJ3f_IXvIrYrrAjlpOC5zmLtrkY,2031
16
+ langchain_dev_utils/agents/middleware/tool_selection.py,sha256=dRH5ejR6N02Djwxt6Gd63MYkg6SV5pySlzaRt53OoZk,3113
17
+ langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
18
+ langchain_dev_utils/chat_models/base.py,sha256=CVMfgqMRnIKv8z4babusa2c4RKVuiWTL39mPD8cHAf4,11880
19
+ langchain_dev_utils/chat_models/types.py,sha256=w9Zu2I_HtpWQ1jNEUE9QkEunxD6UUtIh0hGJVb7b5gk,690
20
+ langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
+ langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=hH713hs4LRfqUbYlqJKR0geJUjXkQAbU6-segYyuLCE,21599
22
+ langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
23
+ langchain_dev_utils/embeddings/base.py,sha256=l4uCB5ecr3GAkfYGpYxqamOPIM6fkP1H_QK-277YEic,9295
24
+ langchain_dev_utils/message_convert/__init__.py,sha256=ZGrHGXPKMrZ_p9MqfIVZ4jgbEyb7aC4Q7X-muuThIYU,457
25
+ langchain_dev_utils/message_convert/content.py,sha256=LhrFXL1zYkkpp4ave6SBorDLig5xnllQ2VYCgFz-eR4,7681
26
+ langchain_dev_utils/message_convert/format.py,sha256=1TOcJ09atH7LRtn_IIuBshKDXAyqoy3Q9b0Po-S-F9g,2377
27
+ langchain_dev_utils/pipeline/__init__.py,sha256=eE6WktaLHDkqMeXDIDaLtm-OPTwtsX_Av8iK9uYrceo,186
28
+ langchain_dev_utils/pipeline/parallel.py,sha256=nwZWbdSNeyanC9WufoJBTceotgT--UnPOfStXjgNMOc,5271
29
+ langchain_dev_utils/pipeline/sequential.py,sha256=sYJXQzVHDKUc-UV-HMv38JTPnse1A7sRM0vqSdpHK0k,3850
30
+ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV2q58fDHs,112
31
+ langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
32
+ langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=7Z_QO5OZUR6K8nLoIcafc6osnvX2IYNorOJcbx6bVso,9672
33
+ langchain_dev_utils/tool_calling/utils.py,sha256=W2ZRRMhn7SHHZxFfCXVaPIh2uFkY2XkO6EWrdRuv6VE,2757
34
+ langchain_dev_utils-1.2.8.dist-info/METADATA,sha256=gzvPmy60STe1Yg-qZHJVMBRPRwXZ8_3uabCEdZjeXmE,13100
35
+ langchain_dev_utils-1.2.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
+ langchain_dev_utils-1.2.8.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
37
+ langchain_dev_utils-1.2.8.dist-info/RECORD,,
@@ -1,36 +0,0 @@
1
- langchain_dev_utils/__init__.py,sha256=vMQK58X8_YZGKzRm0ThvPAKFtpfyejGmUnDrY9RQ13w,22
2
- langchain_dev_utils/_utils.py,sha256=5bFs4cf3HvkMNkv35V8Sowu4YSXmfF5VNwmv_eHfkgQ,1151
3
- langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- langchain_dev_utils/agents/__init__.py,sha256=e17SMQdJIQngbUCr2N1tY-yw0tD3tEnH7PSvyDmVPeQ,127
5
- langchain_dev_utils/agents/factory.py,sha256=JjdJwPTJpQwAlwQlBalbuGej5Jcpy2Fz6lH3EwEaxQo,3979
6
- langchain_dev_utils/agents/file_system.py,sha256=S6RUEmQI2eerW0gBQp0IP0X5ak5FwvqgIGRiycr2iyw,8468
7
- langchain_dev_utils/agents/plan.py,sha256=ydJuJLlNydheQvLPl2uCc3TBVv42YxGzPhKgtldIdIk,6497
8
- langchain_dev_utils/agents/wrap.py,sha256=4BWksU9DRz8c3ZHQiUi4GHwGhNysDLNs8pmLWV7BeAI,5165
9
- langchain_dev_utils/agents/middleware/__init__.py,sha256=cjrb8Rue5uukl9pKPF7CjSrHtcYsUBj3Mdvv2szlp7E,679
10
- langchain_dev_utils/agents/middleware/model_fallback.py,sha256=pXdraahOMukLgvjX70LwhrjIoEhLYQfNEwJMQHG2WPk,1673
11
- langchain_dev_utils/agents/middleware/model_router.py,sha256=Qb_s_FoREp11yKHdmp_ZTRxB1whsFrj86awUNR0fpCk,8461
12
- langchain_dev_utils/agents/middleware/plan.py,sha256=saRXhzkC2pd7LNiNclSmGJelmisbTXhhTrbSUkSkf9g,16220
13
- langchain_dev_utils/agents/middleware/summarization.py,sha256=BtWPJcQBssGAT0nb1c0xsGEOsb8x5sAAE6xqujYjHhY,3027
14
- langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=u9rV24yUB-dyc1uUfUe74B1wOGVI3TZRwxkE1bvGm18,2025
15
- langchain_dev_utils/agents/middleware/tool_selection.py,sha256=ZqdyK4Yhp2u3GM6B_D6U7Srca9vy1o7s6N_LrV24-dQ,3107
16
- langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
17
- langchain_dev_utils/chat_models/base.py,sha256=AYRcGViGJYsquqru_www3zt8-ZCkfzPCrw-dFF6HDts,11661
18
- langchain_dev_utils/chat_models/types.py,sha256=M0iCGWgXmX1f1vkymH-jNGdFQlsJS5JqpmgHctUS9jw,512
19
- langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=4Q8ySa7jS2_AFo0oxLoqeY_aQyPppvV-DAMLt2rmGoE,20192
21
- langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
22
- langchain_dev_utils/embeddings/base.py,sha256=lGZWbi6G1M0OcAO_d_k1QAFJm9z9gM0L4UAZ6xFtEoQ,8973
23
- langchain_dev_utils/message_convert/__init__.py,sha256=xwjaQ1oJoc80xy70oQI4uW3gAmgV5JymJd5hgnA6s3g,458
24
- langchain_dev_utils/message_convert/content.py,sha256=ApmQ7fUUBO3Ihjm2hYSWd4GrU_CvrjbWla-MA7DAFRc,7758
25
- langchain_dev_utils/message_convert/format.py,sha256=fh4GyyuZBTMrHeCEwdu9fOh5n8tdli1vDF44jK1i-tI,2373
26
- langchain_dev_utils/pipeline/__init__.py,sha256=eE6WktaLHDkqMeXDIDaLtm-OPTwtsX_Av8iK9uYrceo,186
27
- langchain_dev_utils/pipeline/parallel.py,sha256=fp-DZmQ470GurLcrYBKCarHM1gyCJuuT33IkgoRkFPc,7586
28
- langchain_dev_utils/pipeline/sequential.py,sha256=TYv0Fs8o2FsgRkWmcM3p2vMg806DopUONVMC_9yeQgk,5041
29
- langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV2q58fDHs,112
30
- langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
31
- langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=nbaON9806pv5tpMRQUA_Ch3HJA5HBFgzZR7kQRf6PiY,9819
32
- langchain_dev_utils/tool_calling/utils.py,sha256=3cNv_Zx32KxdsGn8IkxjWUzxYEEwVJeJgTZTbfSg0pA,2751
33
- langchain_dev_utils-1.2.6.dist-info/METADATA,sha256=PhJoxRlERmnMzRumGzLW3kwU58Cg3Tt6MiFibsfIi8U,19090
34
- langchain_dev_utils-1.2.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
35
- langchain_dev_utils-1.2.6.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
36
- langchain_dev_utils-1.2.6.dist-info/RECORD,,