langchain-dev-utils 1.2.6__py3-none-any.whl → 1.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_dev_utils/__init__.py +1 -1
- langchain_dev_utils/_utils.py +5 -2
- langchain_dev_utils/agents/__init__.py +0 -1
- langchain_dev_utils/agents/factory.py +2 -10
- langchain_dev_utils/agents/file_system.py +1 -1
- langchain_dev_utils/agents/middleware/__init__.py +2 -0
- langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
- langchain_dev_utils/agents/middleware/model_router.py +37 -46
- langchain_dev_utils/agents/middleware/plan.py +17 -18
- langchain_dev_utils/agents/middleware/summarization.py +6 -4
- langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
- langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
- langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
- langchain_dev_utils/agents/plan.py +1 -1
- langchain_dev_utils/agents/wrap.py +8 -20
- langchain_dev_utils/chat_models/adapters/openai_compatible.py +33 -17
- langchain_dev_utils/chat_models/base.py +30 -15
- langchain_dev_utils/chat_models/types.py +0 -1
- langchain_dev_utils/embeddings/base.py +35 -18
- langchain_dev_utils/message_convert/__init__.py +0 -1
- langchain_dev_utils/message_convert/content.py +8 -11
- langchain_dev_utils/message_convert/format.py +2 -2
- langchain_dev_utils/pipeline/parallel.py +10 -41
- langchain_dev_utils/pipeline/sequential.py +6 -21
- langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
- langchain_dev_utils/tool_calling/utils.py +3 -3
- {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.7.dist-info}/METADATA +24 -119
- langchain_dev_utils-1.2.7.dist-info/RECORD +37 -0
- langchain_dev_utils-1.2.6.dist-info/RECORD +0 -36
- {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.7.dist-info}/WHEEL +0 -0
- {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,7 +5,7 @@ from langchain_core.language_models.chat_models import BaseChatModel
|
|
|
5
5
|
from langchain_core.utils import from_env
|
|
6
6
|
|
|
7
7
|
from langchain_dev_utils._utils import (
|
|
8
|
-
|
|
8
|
+
_check_pkg_install,
|
|
9
9
|
_get_base_url_field_name,
|
|
10
10
|
)
|
|
11
11
|
|
|
@@ -95,11 +95,22 @@ def register_model_provider(
|
|
|
95
95
|
string identifiers for supported providers.
|
|
96
96
|
|
|
97
97
|
Args:
|
|
98
|
-
provider_name: The name of the model provider, used as an identifier for
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
98
|
+
provider_name: The name of the model provider, used as an identifier for
|
|
99
|
+
loading models later.
|
|
100
|
+
chat_model: The chat model, which can be either a `ChatModel` instance or
|
|
101
|
+
a string (currently only `"openai-compatible"` is supported).
|
|
102
|
+
base_url: The API endpoint URL of the model provider (optional; applicable
|
|
103
|
+
to both `chat_model` types, but primarily used when `chat_model` is a
|
|
104
|
+
string with value `"openai-compatible"`).
|
|
105
|
+
model_profiles: Declares the capabilities and parameters supported by each
|
|
106
|
+
model provided by this provider (optional; applicable to both `chat_model`
|
|
107
|
+
types). The configuration corresponding to the `model_name` will be loaded
|
|
108
|
+
and assigned to `model.profile` (e.g., fields such as `max_input_tokens`,
|
|
109
|
+
`tool_calling`etc.).
|
|
110
|
+
compatibility_options: Compatibility options for the model provider (optional;
|
|
111
|
+
only effective when `chat_model` is a string with value `"openai-compatible"`).
|
|
112
|
+
Used to declare support for OpenAI-compatible features (e.g., `tool_choice`
|
|
113
|
+
strategies, JSON mode, etc.) to ensure correct functional adaptation.
|
|
103
114
|
Raises:
|
|
104
115
|
ValueError: If base_url is not provided when chat_model is a string,
|
|
105
116
|
or if chat_model string is not in supported providers
|
|
@@ -109,19 +120,23 @@ def register_model_provider(
|
|
|
109
120
|
>>> from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
|
|
110
121
|
>>> from langchain_core.language_models.fake_chat_models import FakeChatModel
|
|
111
122
|
>>>
|
|
112
|
-
|
|
123
|
+
# Register custom model provider
|
|
113
124
|
>>> register_model_provider("fakechat", FakeChatModel)
|
|
114
125
|
>>> model = load_chat_model(model="fakechat:fake-model")
|
|
115
126
|
>>> model.invoke("Hello")
|
|
116
127
|
>>>
|
|
117
|
-
|
|
118
|
-
>>> register_model_provider(
|
|
128
|
+
# Using with OpenAI-compatible API:
|
|
129
|
+
>>> register_model_provider(
|
|
130
|
+
... provider_name="vllm",
|
|
131
|
+
... chat_model="openai-compatible",
|
|
132
|
+
... base_url="http://localhost:8000/v1",
|
|
133
|
+
... )
|
|
119
134
|
>>> model = load_chat_model(model="vllm:qwen3-4b")
|
|
120
135
|
>>> model.invoke("Hello")
|
|
121
136
|
"""
|
|
122
137
|
base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
|
|
123
138
|
if isinstance(chat_model, str):
|
|
124
|
-
|
|
139
|
+
_check_pkg_install("langchain_openai")
|
|
125
140
|
from .adapters.openai_compatible import _create_openai_compatible_model
|
|
126
141
|
|
|
127
142
|
if base_url is None:
|
|
@@ -204,7 +219,7 @@ def batch_register_model_provider(
|
|
|
204
219
|
>>> from langchain_dev_utils.chat_models import batch_register_model_provider, load_chat_model
|
|
205
220
|
>>> from langchain_core.language_models.fake_chat_models import FakeChatModel
|
|
206
221
|
>>>
|
|
207
|
-
|
|
222
|
+
# Register multiple providers
|
|
208
223
|
>>> batch_register_model_provider([
|
|
209
224
|
... {
|
|
210
225
|
... "provider_name": "fakechat",
|
|
@@ -217,7 +232,7 @@ def batch_register_model_provider(
|
|
|
217
232
|
... },
|
|
218
233
|
... ])
|
|
219
234
|
>>>
|
|
220
|
-
|
|
235
|
+
# Use registered providers
|
|
221
236
|
>>> model = load_chat_model("fakechat:fake-model")
|
|
222
237
|
>>> model.invoke("Hello")
|
|
223
238
|
>>>
|
|
@@ -257,16 +272,16 @@ def load_chat_model(
|
|
|
257
272
|
BaseChatModel: Initialized chat model instance
|
|
258
273
|
|
|
259
274
|
Example:
|
|
260
|
-
Load model with provider prefix:
|
|
275
|
+
# Load model with provider prefix:
|
|
261
276
|
>>> from langchain_dev_utils.chat_models import load_chat_model
|
|
262
277
|
>>> model = load_chat_model("vllm:qwen3-4b")
|
|
263
278
|
>>> model.invoke("hello")
|
|
264
279
|
|
|
265
|
-
Load model with separate provider parameter:
|
|
280
|
+
# Load model with separate provider parameter:
|
|
266
281
|
>>> model = load_chat_model("qwen3-4b", model_provider="vllm")
|
|
267
282
|
>>> model.invoke("hello")
|
|
268
283
|
|
|
269
|
-
Load model with additional parameters:
|
|
284
|
+
# Load model with additional parameters:
|
|
270
285
|
>>> model = load_chat_model(
|
|
271
286
|
... "vllm:qwen3-4b",
|
|
272
287
|
... temperature=0.7
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
|
|
2
2
|
|
|
3
|
-
from langchain.embeddings.base import
|
|
3
|
+
from langchain.embeddings.base import _SUPPORTED_PROVIDERS, Embeddings, init_embeddings
|
|
4
4
|
from langchain_core.utils import from_env, secret_from_env
|
|
5
5
|
|
|
6
6
|
from langchain_dev_utils._utils import (
|
|
7
|
-
|
|
7
|
+
_check_pkg_install,
|
|
8
8
|
_get_base_url_field_name,
|
|
9
9
|
)
|
|
10
10
|
|
|
@@ -60,14 +60,17 @@ def register_embeddings_provider(
|
|
|
60
60
|
|
|
61
61
|
Args:
|
|
62
62
|
provider_name: Name of the provider to register
|
|
63
|
-
embeddings_model: Either an Embeddings class or a string identifier
|
|
64
|
-
|
|
63
|
+
embeddings_model: Either an Embeddings class or a string identifier
|
|
64
|
+
for a supported provider
|
|
65
|
+
base_url: The API address of the Embedding model provider (optional,
|
|
66
|
+
valid for both types of `embeddings_model`, but mainly used when
|
|
67
|
+
`embeddings_model` is a string and is "openai-compatible")
|
|
65
68
|
|
|
66
69
|
Raises:
|
|
67
70
|
ValueError: If base_url is not provided when embeddings_model is a string
|
|
68
71
|
|
|
69
72
|
Example:
|
|
70
|
-
Register with custom model class:
|
|
73
|
+
# Register with custom model class:
|
|
71
74
|
>>> from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
|
|
72
75
|
>>> from langchain_core.embeddings.fake import FakeEmbeddings
|
|
73
76
|
>>>
|
|
@@ -75,9 +78,11 @@ def register_embeddings_provider(
|
|
|
75
78
|
>>> embeddings = load_embeddings("fakeembeddings:fake-embeddings",size=1024)
|
|
76
79
|
>>> embeddings.embed_query("hello world")
|
|
77
80
|
|
|
78
|
-
Register with OpenAI-compatible API:
|
|
81
|
+
# Register with OpenAI-compatible API:
|
|
79
82
|
>>> register_embeddings_provider(
|
|
80
|
-
... "vllm",
|
|
83
|
+
... "vllm",
|
|
84
|
+
... "openai-compatible",
|
|
85
|
+
... base_url="http://localhost:8000/v1"
|
|
81
86
|
... )
|
|
82
87
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
83
88
|
>>> embeddings.embed_query("hello world")
|
|
@@ -95,7 +100,7 @@ def register_embeddings_provider(
|
|
|
95
100
|
"when embeddings_model is a string, the value must be 'openai-compatible'"
|
|
96
101
|
)
|
|
97
102
|
|
|
98
|
-
|
|
103
|
+
_check_pkg_install("langchain_openai")
|
|
99
104
|
|
|
100
105
|
_EMBEDDINGS_PROVIDERS_DICT.update(
|
|
101
106
|
{
|
|
@@ -126,32 +131,44 @@ def batch_register_embeddings_provider(
|
|
|
126
131
|
):
|
|
127
132
|
"""Batch register embeddings providers.
|
|
128
133
|
|
|
129
|
-
This function allows you to register multiple embeddings providers at once,
|
|
130
|
-
useful when setting up applications that need to work with multiple
|
|
134
|
+
This function allows you to register multiple embeddings providers at once,
|
|
135
|
+
which is useful when setting up applications that need to work with multiple
|
|
136
|
+
embedding services.
|
|
131
137
|
|
|
132
138
|
Args:
|
|
133
139
|
providers: List of EmbeddingProvider dictionaries, each containing:
|
|
134
140
|
- provider_name: str - Provider name
|
|
135
|
-
- embeddings_model: Union[Type[Embeddings], str] - Model class or
|
|
136
|
-
|
|
141
|
+
- embeddings_model: Union[Type[Embeddings], str] - Model class or
|
|
142
|
+
provider string
|
|
143
|
+
- base_url: The API address of the Embedding model provider
|
|
144
|
+
(optional, valid for both types of `embeddings_model`, but
|
|
145
|
+
mainly used when `embeddings_model` is a string and is
|
|
146
|
+
"openai-compatible")
|
|
137
147
|
|
|
138
148
|
Raises:
|
|
139
149
|
ValueError: If any of the providers are invalid
|
|
140
150
|
|
|
141
151
|
Example:
|
|
142
|
-
Register multiple providers at once:
|
|
152
|
+
# Register multiple providers at once:
|
|
143
153
|
>>> from langchain_dev_utils.embeddings import batch_register_embeddings_provider, load_embeddings
|
|
144
154
|
>>> from langchain_core.embeddings.fake import FakeEmbeddings
|
|
145
155
|
>>>
|
|
146
156
|
>>> batch_register_embeddings_provider(
|
|
147
157
|
... [
|
|
148
|
-
... {
|
|
149
|
-
...
|
|
158
|
+
... {
|
|
159
|
+
... "provider_name": "fakeembeddings",
|
|
160
|
+
... "embeddings_model": FakeEmbeddings,
|
|
161
|
+
... },
|
|
162
|
+
... {
|
|
163
|
+
... "provider_name": "vllm",
|
|
164
|
+
... "embeddings_model": "openai-compatible",
|
|
165
|
+
... "base_url": "http://localhost:8000/v1"
|
|
166
|
+
... },
|
|
150
167
|
... ]
|
|
151
168
|
... )
|
|
152
169
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
153
170
|
>>> embeddings.embed_query("hello world")
|
|
154
|
-
>>> embeddings = load_embeddings("fakeembeddings:fake-embeddings",size=1024)
|
|
171
|
+
>>> embeddings = load_embeddings("fakeembeddings:fake-embeddings", size=1024)
|
|
155
172
|
>>> embeddings.embed_query("hello world")
|
|
156
173
|
"""
|
|
157
174
|
for provider in providers:
|
|
@@ -185,12 +202,12 @@ def load_embeddings(
|
|
|
185
202
|
ValueError: If provider is not registered or API key is not found
|
|
186
203
|
|
|
187
204
|
Example:
|
|
188
|
-
Load model with provider prefix:
|
|
205
|
+
# Load model with provider prefix:
|
|
189
206
|
>>> from langchain_dev_utils.embeddings import load_embeddings
|
|
190
207
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
191
208
|
>>> embeddings.embed_query("hello world")
|
|
192
209
|
|
|
193
|
-
Load model with separate provider parameter:
|
|
210
|
+
# Load model with separate provider parameter:
|
|
194
211
|
>>> embeddings = load_embeddings("qwen3-embedding-4b", provider="vllm")
|
|
195
212
|
>>> embeddings.embed_query("hello world")
|
|
196
213
|
"""
|
|
@@ -36,13 +36,13 @@ def convert_reasoning_content_for_ai_message(
|
|
|
36
36
|
AIMessage: Modified AI message with reasoning content in visible content
|
|
37
37
|
|
|
38
38
|
Example:
|
|
39
|
-
Basic usage with default tags:
|
|
39
|
+
# Basic usage with default tags:
|
|
40
40
|
>>> from langchain_dev_utils.message_convert import convert_reasoning_content_for_ai_message
|
|
41
41
|
>>> response = model.invoke("Explain quantum computing")
|
|
42
42
|
>>> response = convert_reasoning_content_for_ai_message(response)
|
|
43
43
|
>>> response.content
|
|
44
44
|
|
|
45
|
-
Custom tags for reasoning content:
|
|
45
|
+
# Custom tags for reasoning content:
|
|
46
46
|
>>> response = convert_reasoning_content_for_ai_message(
|
|
47
47
|
... response, think_tag=('<reasoning>', '</reasoning>')
|
|
48
48
|
... )
|
|
@@ -77,14 +77,14 @@ def convert_reasoning_content_for_chunk_iterator(
|
|
|
77
77
|
BaseMessageChunk: Modified message chunks with reasoning content
|
|
78
78
|
|
|
79
79
|
Example:
|
|
80
|
-
Process streaming response:
|
|
80
|
+
# Process streaming response:
|
|
81
81
|
>>> from langchain_dev_utils.message_convert import convert_reasoning_content_for_chunk_iterator
|
|
82
82
|
>>> for chunk in convert_reasoning_content_for_chunk_iterator(
|
|
83
83
|
... model.stream("What is the capital of France?")
|
|
84
84
|
... ):
|
|
85
85
|
... print(chunk.content, end="", flush=True)
|
|
86
86
|
|
|
87
|
-
Custom tags for streaming:
|
|
87
|
+
# Custom tags for streaming:
|
|
88
88
|
>>> for chunk in convert_reasoning_content_for_chunk_iterator(
|
|
89
89
|
... model.stream("Explain quantum computing"),
|
|
90
90
|
... think_tag=('<reasoning>', '</reasoning>')
|
|
@@ -127,14 +127,14 @@ async def aconvert_reasoning_content_for_chunk_iterator(
|
|
|
127
127
|
BaseMessageChunk: Modified message chunks with reasoning content
|
|
128
128
|
|
|
129
129
|
Example:
|
|
130
|
-
Process async streaming response:
|
|
130
|
+
# Process async streaming response:
|
|
131
131
|
>>> from langchain_dev_utils.message_convert import aconvert_reasoning_content_for_chunk_iterator
|
|
132
132
|
>>> async for chunk in aconvert_reasoning_content_for_chunk_iterator(
|
|
133
133
|
... model.astream("What is the capital of France?")
|
|
134
134
|
... ):
|
|
135
135
|
... print(chunk.content, end="", flush=True)
|
|
136
136
|
|
|
137
|
-
Custom tags for async streaming:
|
|
137
|
+
# Custom tags for async streaming:
|
|
138
138
|
>>> async for chunk in aconvert_reasoning_content_for_chunk_iterator(
|
|
139
139
|
... model.astream("Explain quantum computing"),
|
|
140
140
|
... think_tag=('<reasoning>', '</reasoning>')
|
|
@@ -172,12 +172,9 @@ def merge_ai_message_chunk(chunks: Sequence[AIMessageChunk]) -> AIMessage:
|
|
|
172
172
|
AIMessage: Merged AIMessage
|
|
173
173
|
|
|
174
174
|
Example:
|
|
175
|
-
Merge streaming chunks:
|
|
175
|
+
# Merge streaming chunks:
|
|
176
176
|
>>> from langchain_dev_utils.message_convert import merge_ai_message_chunk
|
|
177
|
-
>>>
|
|
178
|
-
>>> for chunk in model.stream("What is the capital of France?"):
|
|
179
|
-
... chunks.append(chunk)
|
|
180
|
-
>>> merged_message = merge_ai_message_chunk(chunks)
|
|
177
|
+
>>> merged_message = merge_ai_message_chunk(list(model.stream("What is the capital of France?")))
|
|
181
178
|
>>> merged_message.content
|
|
182
179
|
"""
|
|
183
180
|
ai_message_chunk = cast(AIMessageChunk, reduce(lambda x, y: x + y, chunks))
|
|
@@ -34,7 +34,7 @@ def format_sequence(
|
|
|
34
34
|
A formatted string composed of the input contents, joined by `separator`.
|
|
35
35
|
|
|
36
36
|
Example:
|
|
37
|
-
Format messages with default separator:
|
|
37
|
+
# Format messages with default separator:
|
|
38
38
|
>>> from langchain_dev_utils.message_convert import format_sequence
|
|
39
39
|
>>> from langchain_core.messages import HumanMessage, AIMessage
|
|
40
40
|
>>> messages = [
|
|
@@ -44,7 +44,7 @@ def format_sequence(
|
|
|
44
44
|
>>> formatted = format_sequence(messages)
|
|
45
45
|
>>> formatted
|
|
46
46
|
|
|
47
|
-
Format with custom separator and numbering:
|
|
47
|
+
# Format with custom separator and numbering:
|
|
48
48
|
>>> formatted = format_sequence(messages, separator="---", with_num=True)
|
|
49
49
|
>>> formatted
|
|
50
50
|
"""
|
|
@@ -38,41 +38,27 @@ def create_parallel_pipeline(
|
|
|
38
38
|
sub_graphs: List of sub-graphs to execute in parallel
|
|
39
39
|
state_schema: state schema of the final constructed graph
|
|
40
40
|
graph_name: Name of the final constructed graph
|
|
41
|
-
branches_fn: Optional function to determine which sub-graphs to execute
|
|
41
|
+
branches_fn: Optional function to determine which sub-graphs to execute
|
|
42
|
+
in parallel
|
|
42
43
|
context_schema: context schema of the final constructed graph
|
|
43
44
|
input_schema: input schema of the final constructed graph
|
|
44
45
|
output_schema: output schema of the final constructed graph
|
|
45
|
-
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
46
|
+
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
47
|
+
graph
|
|
46
48
|
store: Optional LangGraph store for the final constructed graph
|
|
47
49
|
cache: Optional LangGraph cache for the final constructed graph
|
|
48
50
|
|
|
49
51
|
Returns:
|
|
50
|
-
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
52
|
+
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
53
|
+
graph of the pipeline.
|
|
51
54
|
|
|
52
55
|
Example:
|
|
53
|
-
Basic parallel pipeline
|
|
56
|
+
# Basic parallel pipeline: multiple specialized agents run concurrently
|
|
54
57
|
>>> from langchain_dev_utils.pipeline import create_parallel_pipeline
|
|
55
58
|
>>>
|
|
56
59
|
>>> graph = create_parallel_pipeline(
|
|
57
60
|
... sub_graphs=[
|
|
58
|
-
...
|
|
59
|
-
... model="vllm:qwen3-4b",
|
|
60
|
-
... tools=[get_current_time],
|
|
61
|
-
... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
|
|
62
|
-
... name="time_agent",
|
|
63
|
-
... ),
|
|
64
|
-
... create_agent(
|
|
65
|
-
... model="vllm:qwen3-4b",
|
|
66
|
-
... tools=[get_current_weather],
|
|
67
|
-
... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
|
|
68
|
-
... name="weather_agent",
|
|
69
|
-
... ),
|
|
70
|
-
... create_agent(
|
|
71
|
-
... model="vllm:qwen3-4b",
|
|
72
|
-
... tools=[get_current_user],
|
|
73
|
-
... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
|
|
74
|
-
... name="user_agent",
|
|
75
|
-
... ),
|
|
61
|
+
... time_agent, weather_agent, user_agent
|
|
76
62
|
... ],
|
|
77
63
|
... state_schema=AgentState,
|
|
78
64
|
... graph_name="parallel_agents_pipeline",
|
|
@@ -80,27 +66,10 @@ def create_parallel_pipeline(
|
|
|
80
66
|
>>>
|
|
81
67
|
>>> response = graph.invoke({"messages": [HumanMessage("Hello")]})
|
|
82
68
|
|
|
83
|
-
|
|
69
|
+
# Dynamic parallel pipeline: decide which agents to run based on conditional branches
|
|
84
70
|
>>> graph = create_parallel_pipeline(
|
|
85
71
|
... sub_graphs=[
|
|
86
|
-
...
|
|
87
|
-
... model="vllm:qwen3-4b",
|
|
88
|
-
... tools=[get_current_time],
|
|
89
|
-
... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
|
|
90
|
-
... name="time_agent",
|
|
91
|
-
... ),
|
|
92
|
-
... create_agent(
|
|
93
|
-
... model="vllm:qwen3-4b",
|
|
94
|
-
... tools=[get_current_weather],
|
|
95
|
-
... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
|
|
96
|
-
... name="weather_agent",
|
|
97
|
-
... ),
|
|
98
|
-
... create_agent(
|
|
99
|
-
... model="vllm:qwen3-4b",
|
|
100
|
-
... tools=[get_current_user],
|
|
101
|
-
... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
|
|
102
|
-
... name="user_agent",
|
|
103
|
-
... ),
|
|
72
|
+
... time_agent, weather_agent, user_agent
|
|
104
73
|
... ],
|
|
105
74
|
... state_schema=AgentState,
|
|
106
75
|
... branches_fn=lambda state: [
|
|
@@ -35,37 +35,22 @@ def create_sequential_pipeline(
|
|
|
35
35
|
context_schema: context schema of the final constructed graph
|
|
36
36
|
input_schema: input schema of the final constructed graph
|
|
37
37
|
output_schema: output schema of the final constructed graph
|
|
38
|
-
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
38
|
+
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
39
|
+
graph
|
|
39
40
|
store: Optional LangGraph store for the final constructed graph
|
|
40
41
|
cache: Optional LangGraph cache for the final constructed graph
|
|
41
42
|
|
|
42
43
|
Returns:
|
|
43
|
-
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
44
|
+
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
45
|
+
graph of the pipeline.
|
|
44
46
|
|
|
45
47
|
Example:
|
|
46
|
-
Basic sequential pipeline with multiple specialized agents:
|
|
48
|
+
# Basic sequential pipeline with multiple specialized agents:
|
|
47
49
|
>>> from langchain_dev_utils.pipeline import create_sequential_pipeline
|
|
48
50
|
>>>
|
|
49
51
|
>>> graph = create_sequential_pipeline(
|
|
50
52
|
... sub_graphs=[
|
|
51
|
-
...
|
|
52
|
-
... model="vllm:qwen3-4b",
|
|
53
|
-
... tools=[get_current_time],
|
|
54
|
-
... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
|
|
55
|
-
... name="time_agent",
|
|
56
|
-
... ),
|
|
57
|
-
... create_agent(
|
|
58
|
-
... model="vllm:qwen3-4b",
|
|
59
|
-
... tools=[get_current_weather],
|
|
60
|
-
... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
|
|
61
|
-
... name="weather_agent",
|
|
62
|
-
... ),
|
|
63
|
-
... create_agent(
|
|
64
|
-
... model="vllm:qwen3-4b",
|
|
65
|
-
... tools=[get_current_user],
|
|
66
|
-
... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
|
|
67
|
-
... name="user_agent",
|
|
68
|
-
... ),
|
|
53
|
+
... time_agent, weather_agent, user_agent
|
|
69
54
|
... ],
|
|
70
55
|
... state_schema=AgentState,
|
|
71
56
|
... graph_name="sequential_agents_pipeline",
|
|
@@ -133,7 +133,7 @@ def human_in_the_loop(
|
|
|
133
133
|
If `func` is None, returns a decorator that will decorate the target function.
|
|
134
134
|
|
|
135
135
|
Example:
|
|
136
|
-
Basic usage with default handler:
|
|
136
|
+
# Basic usage with default handler:
|
|
137
137
|
>>> from langchain_dev_utils.tool_calling import human_in_the_loop
|
|
138
138
|
>>> from langchain_core.tools import tool
|
|
139
139
|
>>> import datetime
|
|
@@ -144,10 +144,10 @@ def human_in_the_loop(
|
|
|
144
144
|
... \"\"\"Get current timestamp\"\"\"
|
|
145
145
|
... return str(datetime.datetime.now().timestamp())
|
|
146
146
|
|
|
147
|
-
Usage with custom handler:
|
|
147
|
+
# Usage with custom handler:
|
|
148
148
|
>>> def custom_handler(params: InterruptParams) -> Any:
|
|
149
149
|
... response = interrupt(
|
|
150
|
-
...
|
|
150
|
+
... # Please add your custom interrupt response content here
|
|
151
151
|
... )
|
|
152
152
|
... if response["type"] == "accept":
|
|
153
153
|
... return params["tool"].invoke(params["tool_call_args"])
|
|
@@ -219,7 +219,7 @@ def human_in_the_loop_async(
|
|
|
219
219
|
If `func` is None, returns a decorator that will decorate the target function.
|
|
220
220
|
|
|
221
221
|
Example:
|
|
222
|
-
Basic usage with default handler:
|
|
222
|
+
# Basic usage with default handler:
|
|
223
223
|
>>> from langchain_dev_utils.tool_calling import human_in_the_loop_async
|
|
224
224
|
>>> from langchain_core.tools import tool
|
|
225
225
|
>>> import asyncio
|
|
@@ -232,10 +232,10 @@ def human_in_the_loop_async(
|
|
|
232
232
|
... await asyncio.sleep(1)
|
|
233
233
|
... return str(datetime.datetime.now().timestamp())
|
|
234
234
|
|
|
235
|
-
Usage with custom handler:
|
|
235
|
+
# Usage with custom handler:
|
|
236
236
|
>>> async def custom_handler(params: InterruptParams) -> Any:
|
|
237
237
|
... response = interrupt(
|
|
238
|
-
...
|
|
238
|
+
... ... # Please add your custom interrupt response content here
|
|
239
239
|
... )
|
|
240
240
|
... if response["type"] == "accept":
|
|
241
241
|
... return await params["tool"].ainvoke(params["tool_call_args"])
|
|
@@ -16,7 +16,7 @@ def has_tool_calling(message: AIMessage) -> bool:
|
|
|
16
16
|
bool: True if message is an AIMessage with tool calls, False otherwise
|
|
17
17
|
|
|
18
18
|
Example:
|
|
19
|
-
Check for tool calls in response:
|
|
19
|
+
# Check for tool calls in response:
|
|
20
20
|
>>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
|
|
21
21
|
>>> response = model.invoke("What time is it now?")
|
|
22
22
|
>>> if has_tool_calling(response):
|
|
@@ -50,14 +50,14 @@ def parse_tool_calling(
|
|
|
50
50
|
Union[tuple[str, dict], list[tuple[str, dict]]]: The tool call name and args
|
|
51
51
|
|
|
52
52
|
Example:
|
|
53
|
-
Parse single tool call:
|
|
53
|
+
# Parse single tool call:
|
|
54
54
|
>>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
|
|
55
55
|
>>> response = model.invoke("What time is it now?")
|
|
56
56
|
>>> response
|
|
57
57
|
>>> if has_tool_calling(response):
|
|
58
58
|
... tool_name, tool_args = parse_tool_calling(response, first_tool_call_only=True)
|
|
59
59
|
|
|
60
|
-
Parse multiple tool calls:
|
|
60
|
+
# Parse multiple tool calls:
|
|
61
61
|
>>> if has_tool_calling(response):
|
|
62
62
|
... tool_calls = parse_tool_calling(response)
|
|
63
63
|
"""
|