langchain-dev-utils 1.2.5__py3-none-any.whl → 1.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_dev_utils/__init__.py +1 -1
- langchain_dev_utils/_utils.py +42 -0
- langchain_dev_utils/agents/__init__.py +0 -1
- langchain_dev_utils/agents/factory.py +2 -10
- langchain_dev_utils/agents/file_system.py +1 -1
- langchain_dev_utils/agents/middleware/__init__.py +2 -0
- langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
- langchain_dev_utils/agents/middleware/model_router.py +37 -46
- langchain_dev_utils/agents/middleware/plan.py +17 -18
- langchain_dev_utils/agents/middleware/summarization.py +6 -4
- langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
- langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
- langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
- langchain_dev_utils/agents/plan.py +1 -1
- langchain_dev_utils/agents/wrap.py +8 -20
- langchain_dev_utils/chat_models/adapters/openai_compatible.py +33 -17
- langchain_dev_utils/chat_models/base.py +38 -50
- langchain_dev_utils/chat_models/types.py +0 -1
- langchain_dev_utils/embeddings/base.py +40 -46
- langchain_dev_utils/message_convert/__init__.py +0 -1
- langchain_dev_utils/message_convert/content.py +8 -11
- langchain_dev_utils/message_convert/format.py +2 -2
- langchain_dev_utils/pipeline/parallel.py +10 -41
- langchain_dev_utils/pipeline/sequential.py +6 -21
- langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
- langchain_dev_utils/tool_calling/utils.py +3 -3
- {langchain_dev_utils-1.2.5.dist-info → langchain_dev_utils-1.2.7.dist-info}/METADATA +28 -120
- langchain_dev_utils-1.2.7.dist-info/RECORD +37 -0
- langchain_dev_utils-1.2.5.dist-info/RECORD +0 -35
- {langchain_dev_utils-1.2.5.dist-info → langchain_dev_utils-1.2.7.dist-info}/WHEEL +0 -0
- {langchain_dev_utils-1.2.5.dist-info → langchain_dev_utils-1.2.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,7 +3,11 @@ from typing import Any, NotRequired, Optional, TypedDict, cast
|
|
|
3
3
|
from langchain.chat_models.base import _SUPPORTED_PROVIDERS, _init_chat_model_helper
|
|
4
4
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
5
5
|
from langchain_core.utils import from_env
|
|
6
|
-
|
|
6
|
+
|
|
7
|
+
from langchain_dev_utils._utils import (
|
|
8
|
+
_check_pkg_install,
|
|
9
|
+
_get_base_url_field_name,
|
|
10
|
+
)
|
|
7
11
|
|
|
8
12
|
from .types import ChatModelType, CompatibilityOptions
|
|
9
13
|
|
|
@@ -18,34 +22,6 @@ class ChatModelProvider(TypedDict):
|
|
|
18
22
|
compatibility_options: NotRequired[CompatibilityOptions]
|
|
19
23
|
|
|
20
24
|
|
|
21
|
-
def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
|
|
22
|
-
"""
|
|
23
|
-
Return 'base_url' if the model has a field named or aliased as 'base_url',
|
|
24
|
-
else return 'api_base' if it has a field named or aliased as 'api_base',
|
|
25
|
-
else return None.
|
|
26
|
-
The return value is always either 'base_url', 'api_base', or None.
|
|
27
|
-
"""
|
|
28
|
-
model_fields = model_cls.model_fields
|
|
29
|
-
|
|
30
|
-
# try model_fields first
|
|
31
|
-
if "base_url" in model_fields:
|
|
32
|
-
return "base_url"
|
|
33
|
-
|
|
34
|
-
if "api_base" in model_fields:
|
|
35
|
-
return "api_base"
|
|
36
|
-
|
|
37
|
-
# then try aliases
|
|
38
|
-
for field_info in model_fields.values():
|
|
39
|
-
if field_info.alias == "base_url":
|
|
40
|
-
return "base_url"
|
|
41
|
-
|
|
42
|
-
for field_info in model_fields.values():
|
|
43
|
-
if field_info.alias == "api_base":
|
|
44
|
-
return "api_base"
|
|
45
|
-
|
|
46
|
-
return None
|
|
47
|
-
|
|
48
|
-
|
|
49
25
|
def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
|
50
26
|
"""Parse model string and provider.
|
|
51
27
|
|
|
@@ -89,7 +65,7 @@ def _load_chat_model_helper(
|
|
|
89
65
|
BaseChatModel: Initialized chat model instance
|
|
90
66
|
"""
|
|
91
67
|
model, model_provider = _parse_model(model, model_provider)
|
|
92
|
-
if model_provider in _MODEL_PROVIDERS_DICT
|
|
68
|
+
if model_provider in _MODEL_PROVIDERS_DICT:
|
|
93
69
|
chat_model = _MODEL_PROVIDERS_DICT[model_provider]["chat_model"]
|
|
94
70
|
if base_url := _MODEL_PROVIDERS_DICT[model_provider].get("base_url"):
|
|
95
71
|
url_key = _get_base_url_field_name(chat_model)
|
|
@@ -98,7 +74,7 @@ def _load_chat_model_helper(
|
|
|
98
74
|
if model_profiles := _MODEL_PROVIDERS_DICT[model_provider].get(
|
|
99
75
|
"model_profiles"
|
|
100
76
|
):
|
|
101
|
-
if model in model_profiles:
|
|
77
|
+
if model in model_profiles and "profile" not in kwargs:
|
|
102
78
|
kwargs.update({"profile": model_profiles[model]})
|
|
103
79
|
return chat_model(model=model, **kwargs)
|
|
104
80
|
|
|
@@ -119,11 +95,22 @@ def register_model_provider(
|
|
|
119
95
|
string identifiers for supported providers.
|
|
120
96
|
|
|
121
97
|
Args:
|
|
122
|
-
provider_name: The name of the model provider, used as an identifier for
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
98
|
+
provider_name: The name of the model provider, used as an identifier for
|
|
99
|
+
loading models later.
|
|
100
|
+
chat_model: The chat model, which can be either a `ChatModel` instance or
|
|
101
|
+
a string (currently only `"openai-compatible"` is supported).
|
|
102
|
+
base_url: The API endpoint URL of the model provider (optional; applicable
|
|
103
|
+
to both `chat_model` types, but primarily used when `chat_model` is a
|
|
104
|
+
string with value `"openai-compatible"`).
|
|
105
|
+
model_profiles: Declares the capabilities and parameters supported by each
|
|
106
|
+
model provided by this provider (optional; applicable to both `chat_model`
|
|
107
|
+
types). The configuration corresponding to the `model_name` will be loaded
|
|
108
|
+
and assigned to `model.profile` (e.g., fields such as `max_input_tokens`,
|
|
109
|
+
`tool_calling`etc.).
|
|
110
|
+
compatibility_options: Compatibility options for the model provider (optional;
|
|
111
|
+
only effective when `chat_model` is a string with value `"openai-compatible"`).
|
|
112
|
+
Used to declare support for OpenAI-compatible features (e.g., `tool_choice`
|
|
113
|
+
strategies, JSON mode, etc.) to ensure correct functional adaptation.
|
|
127
114
|
Raises:
|
|
128
115
|
ValueError: If base_url is not provided when chat_model is a string,
|
|
129
116
|
or if chat_model string is not in supported providers
|
|
@@ -133,24 +120,25 @@ def register_model_provider(
|
|
|
133
120
|
>>> from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
|
|
134
121
|
>>> from langchain_core.language_models.fake_chat_models import FakeChatModel
|
|
135
122
|
>>>
|
|
136
|
-
|
|
123
|
+
# Register custom model provider
|
|
137
124
|
>>> register_model_provider("fakechat", FakeChatModel)
|
|
138
125
|
>>> model = load_chat_model(model="fakechat:fake-model")
|
|
139
126
|
>>> model.invoke("Hello")
|
|
140
127
|
>>>
|
|
141
|
-
|
|
142
|
-
>>> register_model_provider(
|
|
128
|
+
# Using with OpenAI-compatible API:
|
|
129
|
+
>>> register_model_provider(
|
|
130
|
+
... provider_name="vllm",
|
|
131
|
+
... chat_model="openai-compatible",
|
|
132
|
+
... base_url="http://localhost:8000/v1",
|
|
133
|
+
... )
|
|
143
134
|
>>> model = load_chat_model(model="vllm:qwen3-4b")
|
|
144
135
|
>>> model.invoke("Hello")
|
|
145
136
|
"""
|
|
146
137
|
base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
|
|
147
138
|
if isinstance(chat_model, str):
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
raise ImportError(
|
|
152
|
-
"Please install langchain_dev_utils[standard],when chat_model is a 'openai-compatible'"
|
|
153
|
-
)
|
|
139
|
+
_check_pkg_install("langchain_openai")
|
|
140
|
+
from .adapters.openai_compatible import _create_openai_compatible_model
|
|
141
|
+
|
|
154
142
|
if base_url is None:
|
|
155
143
|
raise ValueError(
|
|
156
144
|
f"base_url must be provided or set {provider_name.upper()}_API_BASE environment variable when chat_model is a string"
|
|
@@ -231,7 +219,7 @@ def batch_register_model_provider(
|
|
|
231
219
|
>>> from langchain_dev_utils.chat_models import batch_register_model_provider, load_chat_model
|
|
232
220
|
>>> from langchain_core.language_models.fake_chat_models import FakeChatModel
|
|
233
221
|
>>>
|
|
234
|
-
|
|
222
|
+
# Register multiple providers
|
|
235
223
|
>>> batch_register_model_provider([
|
|
236
224
|
... {
|
|
237
225
|
... "provider_name": "fakechat",
|
|
@@ -244,7 +232,7 @@ def batch_register_model_provider(
|
|
|
244
232
|
... },
|
|
245
233
|
... ])
|
|
246
234
|
>>>
|
|
247
|
-
|
|
235
|
+
# Use registered providers
|
|
248
236
|
>>> model = load_chat_model("fakechat:fake-model")
|
|
249
237
|
>>> model.invoke("Hello")
|
|
250
238
|
>>>
|
|
@@ -284,16 +272,16 @@ def load_chat_model(
|
|
|
284
272
|
BaseChatModel: Initialized chat model instance
|
|
285
273
|
|
|
286
274
|
Example:
|
|
287
|
-
Load model with provider prefix:
|
|
275
|
+
# Load model with provider prefix:
|
|
288
276
|
>>> from langchain_dev_utils.chat_models import load_chat_model
|
|
289
277
|
>>> model = load_chat_model("vllm:qwen3-4b")
|
|
290
278
|
>>> model.invoke("hello")
|
|
291
279
|
|
|
292
|
-
Load model with separate provider parameter:
|
|
280
|
+
# Load model with separate provider parameter:
|
|
293
281
|
>>> model = load_chat_model("qwen3-4b", model_provider="vllm")
|
|
294
282
|
>>> model.invoke("hello")
|
|
295
283
|
|
|
296
|
-
Load model with additional parameters:
|
|
284
|
+
# Load model with additional parameters:
|
|
297
285
|
>>> model = load_chat_model(
|
|
298
286
|
... "vllm:qwen3-4b",
|
|
299
287
|
... temperature=0.7
|
|
@@ -1,8 +1,12 @@
|
|
|
1
1
|
from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
|
|
2
2
|
|
|
3
|
-
from langchain.embeddings.base import
|
|
3
|
+
from langchain.embeddings.base import _SUPPORTED_PROVIDERS, Embeddings, init_embeddings
|
|
4
4
|
from langchain_core.utils import from_env, secret_from_env
|
|
5
|
-
|
|
5
|
+
|
|
6
|
+
from langchain_dev_utils._utils import (
|
|
7
|
+
_check_pkg_install,
|
|
8
|
+
_get_base_url_field_name,
|
|
9
|
+
)
|
|
6
10
|
|
|
7
11
|
_EMBEDDINGS_PROVIDERS_DICT = {}
|
|
8
12
|
|
|
@@ -15,34 +19,6 @@ class EmbeddingProvider(TypedDict):
|
|
|
15
19
|
base_url: NotRequired[str]
|
|
16
20
|
|
|
17
21
|
|
|
18
|
-
def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
|
|
19
|
-
"""
|
|
20
|
-
Return 'base_url' if the model has a field named or aliased as 'base_url',
|
|
21
|
-
else return 'api_base' if it has a field named or aliased as 'api_base',
|
|
22
|
-
else return None.
|
|
23
|
-
The return value is always either 'base_url', 'api_base', or None.
|
|
24
|
-
"""
|
|
25
|
-
model_fields = model_cls.model_fields
|
|
26
|
-
|
|
27
|
-
# try model_fields first
|
|
28
|
-
if "base_url" in model_fields:
|
|
29
|
-
return "base_url"
|
|
30
|
-
|
|
31
|
-
if "api_base" in model_fields:
|
|
32
|
-
return "api_base"
|
|
33
|
-
|
|
34
|
-
# then try aliases
|
|
35
|
-
for field_info in model_fields.values():
|
|
36
|
-
if field_info.alias == "base_url":
|
|
37
|
-
return "base_url"
|
|
38
|
-
|
|
39
|
-
for field_info in model_fields.values():
|
|
40
|
-
if field_info.alias == "api_base":
|
|
41
|
-
return "api_base"
|
|
42
|
-
|
|
43
|
-
return None
|
|
44
|
-
|
|
45
|
-
|
|
46
22
|
def _parse_model_string(model_name: str) -> tuple[str, str]:
|
|
47
23
|
"""Parse model string into provider and model name.
|
|
48
24
|
|
|
@@ -84,14 +60,17 @@ def register_embeddings_provider(
|
|
|
84
60
|
|
|
85
61
|
Args:
|
|
86
62
|
provider_name: Name of the provider to register
|
|
87
|
-
embeddings_model: Either an Embeddings class or a string identifier
|
|
88
|
-
|
|
63
|
+
embeddings_model: Either an Embeddings class or a string identifier
|
|
64
|
+
for a supported provider
|
|
65
|
+
base_url: The API address of the Embedding model provider (optional,
|
|
66
|
+
valid for both types of `embeddings_model`, but mainly used when
|
|
67
|
+
`embeddings_model` is a string and is "openai-compatible")
|
|
89
68
|
|
|
90
69
|
Raises:
|
|
91
70
|
ValueError: If base_url is not provided when embeddings_model is a string
|
|
92
71
|
|
|
93
72
|
Example:
|
|
94
|
-
Register with custom model class:
|
|
73
|
+
# Register with custom model class:
|
|
95
74
|
>>> from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
|
|
96
75
|
>>> from langchain_core.embeddings.fake import FakeEmbeddings
|
|
97
76
|
>>>
|
|
@@ -99,9 +78,11 @@ def register_embeddings_provider(
|
|
|
99
78
|
>>> embeddings = load_embeddings("fakeembeddings:fake-embeddings",size=1024)
|
|
100
79
|
>>> embeddings.embed_query("hello world")
|
|
101
80
|
|
|
102
|
-
Register with OpenAI-compatible API:
|
|
81
|
+
# Register with OpenAI-compatible API:
|
|
103
82
|
>>> register_embeddings_provider(
|
|
104
|
-
... "vllm",
|
|
83
|
+
... "vllm",
|
|
84
|
+
... "openai-compatible",
|
|
85
|
+
... base_url="http://localhost:8000/v1"
|
|
105
86
|
... )
|
|
106
87
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
107
88
|
>>> embeddings.embed_query("hello world")
|
|
@@ -119,6 +100,8 @@ def register_embeddings_provider(
|
|
|
119
100
|
"when embeddings_model is a string, the value must be 'openai-compatible'"
|
|
120
101
|
)
|
|
121
102
|
|
|
103
|
+
_check_pkg_install("langchain_openai")
|
|
104
|
+
|
|
122
105
|
_EMBEDDINGS_PROVIDERS_DICT.update(
|
|
123
106
|
{
|
|
124
107
|
provider_name: {
|
|
@@ -148,32 +131,44 @@ def batch_register_embeddings_provider(
|
|
|
148
131
|
):
|
|
149
132
|
"""Batch register embeddings providers.
|
|
150
133
|
|
|
151
|
-
This function allows you to register multiple embeddings providers at once,
|
|
152
|
-
useful when setting up applications that need to work with multiple
|
|
134
|
+
This function allows you to register multiple embeddings providers at once,
|
|
135
|
+
which is useful when setting up applications that need to work with multiple
|
|
136
|
+
embedding services.
|
|
153
137
|
|
|
154
138
|
Args:
|
|
155
139
|
providers: List of EmbeddingProvider dictionaries, each containing:
|
|
156
140
|
- provider_name: str - Provider name
|
|
157
|
-
- embeddings_model: Union[Type[Embeddings], str] - Model class or
|
|
158
|
-
|
|
141
|
+
- embeddings_model: Union[Type[Embeddings], str] - Model class or
|
|
142
|
+
provider string
|
|
143
|
+
- base_url: The API address of the Embedding model provider
|
|
144
|
+
(optional, valid for both types of `embeddings_model`, but
|
|
145
|
+
mainly used when `embeddings_model` is a string and is
|
|
146
|
+
"openai-compatible")
|
|
159
147
|
|
|
160
148
|
Raises:
|
|
161
149
|
ValueError: If any of the providers are invalid
|
|
162
150
|
|
|
163
151
|
Example:
|
|
164
|
-
Register multiple providers at once:
|
|
152
|
+
# Register multiple providers at once:
|
|
165
153
|
>>> from langchain_dev_utils.embeddings import batch_register_embeddings_provider, load_embeddings
|
|
166
154
|
>>> from langchain_core.embeddings.fake import FakeEmbeddings
|
|
167
155
|
>>>
|
|
168
156
|
>>> batch_register_embeddings_provider(
|
|
169
157
|
... [
|
|
170
|
-
... {
|
|
171
|
-
...
|
|
158
|
+
... {
|
|
159
|
+
... "provider_name": "fakeembeddings",
|
|
160
|
+
... "embeddings_model": FakeEmbeddings,
|
|
161
|
+
... },
|
|
162
|
+
... {
|
|
163
|
+
... "provider_name": "vllm",
|
|
164
|
+
... "embeddings_model": "openai-compatible",
|
|
165
|
+
... "base_url": "http://localhost:8000/v1"
|
|
166
|
+
... },
|
|
172
167
|
... ]
|
|
173
168
|
... )
|
|
174
169
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
175
170
|
>>> embeddings.embed_query("hello world")
|
|
176
|
-
>>> embeddings = load_embeddings("fakeembeddings:fake-embeddings",size=1024)
|
|
171
|
+
>>> embeddings = load_embeddings("fakeembeddings:fake-embeddings", size=1024)
|
|
177
172
|
>>> embeddings.embed_query("hello world")
|
|
178
173
|
"""
|
|
179
174
|
for provider in providers:
|
|
@@ -207,12 +202,12 @@ def load_embeddings(
|
|
|
207
202
|
ValueError: If provider is not registered or API key is not found
|
|
208
203
|
|
|
209
204
|
Example:
|
|
210
|
-
Load model with provider prefix:
|
|
205
|
+
# Load model with provider prefix:
|
|
211
206
|
>>> from langchain_dev_utils.embeddings import load_embeddings
|
|
212
207
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
213
208
|
>>> embeddings.embed_query("hello world")
|
|
214
209
|
|
|
215
|
-
Load model with separate provider parameter:
|
|
210
|
+
# Load model with separate provider parameter:
|
|
216
211
|
>>> embeddings = load_embeddings("qwen3-embedding-4b", provider="vllm")
|
|
217
212
|
>>> embeddings.embed_query("hello world")
|
|
218
213
|
"""
|
|
@@ -238,7 +233,6 @@ def load_embeddings(
|
|
|
238
233
|
if embeddings == "openai-compatible":
|
|
239
234
|
kwargs["check_embedding_ctx_length"] = False
|
|
240
235
|
embeddings = "openai"
|
|
241
|
-
|
|
242
236
|
return init_embeddings(
|
|
243
237
|
model=model,
|
|
244
238
|
provider=embeddings,
|
|
@@ -36,13 +36,13 @@ def convert_reasoning_content_for_ai_message(
|
|
|
36
36
|
AIMessage: Modified AI message with reasoning content in visible content
|
|
37
37
|
|
|
38
38
|
Example:
|
|
39
|
-
Basic usage with default tags:
|
|
39
|
+
# Basic usage with default tags:
|
|
40
40
|
>>> from langchain_dev_utils.message_convert import convert_reasoning_content_for_ai_message
|
|
41
41
|
>>> response = model.invoke("Explain quantum computing")
|
|
42
42
|
>>> response = convert_reasoning_content_for_ai_message(response)
|
|
43
43
|
>>> response.content
|
|
44
44
|
|
|
45
|
-
Custom tags for reasoning content:
|
|
45
|
+
# Custom tags for reasoning content:
|
|
46
46
|
>>> response = convert_reasoning_content_for_ai_message(
|
|
47
47
|
... response, think_tag=('<reasoning>', '</reasoning>')
|
|
48
48
|
... )
|
|
@@ -77,14 +77,14 @@ def convert_reasoning_content_for_chunk_iterator(
|
|
|
77
77
|
BaseMessageChunk: Modified message chunks with reasoning content
|
|
78
78
|
|
|
79
79
|
Example:
|
|
80
|
-
Process streaming response:
|
|
80
|
+
# Process streaming response:
|
|
81
81
|
>>> from langchain_dev_utils.message_convert import convert_reasoning_content_for_chunk_iterator
|
|
82
82
|
>>> for chunk in convert_reasoning_content_for_chunk_iterator(
|
|
83
83
|
... model.stream("What is the capital of France?")
|
|
84
84
|
... ):
|
|
85
85
|
... print(chunk.content, end="", flush=True)
|
|
86
86
|
|
|
87
|
-
Custom tags for streaming:
|
|
87
|
+
# Custom tags for streaming:
|
|
88
88
|
>>> for chunk in convert_reasoning_content_for_chunk_iterator(
|
|
89
89
|
... model.stream("Explain quantum computing"),
|
|
90
90
|
... think_tag=('<reasoning>', '</reasoning>')
|
|
@@ -127,14 +127,14 @@ async def aconvert_reasoning_content_for_chunk_iterator(
|
|
|
127
127
|
BaseMessageChunk: Modified message chunks with reasoning content
|
|
128
128
|
|
|
129
129
|
Example:
|
|
130
|
-
Process async streaming response:
|
|
130
|
+
# Process async streaming response:
|
|
131
131
|
>>> from langchain_dev_utils.message_convert import aconvert_reasoning_content_for_chunk_iterator
|
|
132
132
|
>>> async for chunk in aconvert_reasoning_content_for_chunk_iterator(
|
|
133
133
|
... model.astream("What is the capital of France?")
|
|
134
134
|
... ):
|
|
135
135
|
... print(chunk.content, end="", flush=True)
|
|
136
136
|
|
|
137
|
-
Custom tags for async streaming:
|
|
137
|
+
# Custom tags for async streaming:
|
|
138
138
|
>>> async for chunk in aconvert_reasoning_content_for_chunk_iterator(
|
|
139
139
|
... model.astream("Explain quantum computing"),
|
|
140
140
|
... think_tag=('<reasoning>', '</reasoning>')
|
|
@@ -172,12 +172,9 @@ def merge_ai_message_chunk(chunks: Sequence[AIMessageChunk]) -> AIMessage:
|
|
|
172
172
|
AIMessage: Merged AIMessage
|
|
173
173
|
|
|
174
174
|
Example:
|
|
175
|
-
Merge streaming chunks:
|
|
175
|
+
# Merge streaming chunks:
|
|
176
176
|
>>> from langchain_dev_utils.message_convert import merge_ai_message_chunk
|
|
177
|
-
>>>
|
|
178
|
-
>>> for chunk in model.stream("What is the capital of France?"):
|
|
179
|
-
... chunks.append(chunk)
|
|
180
|
-
>>> merged_message = merge_ai_message_chunk(chunks)
|
|
177
|
+
>>> merged_message = merge_ai_message_chunk(list(model.stream("What is the capital of France?")))
|
|
181
178
|
>>> merged_message.content
|
|
182
179
|
"""
|
|
183
180
|
ai_message_chunk = cast(AIMessageChunk, reduce(lambda x, y: x + y, chunks))
|
|
@@ -34,7 +34,7 @@ def format_sequence(
|
|
|
34
34
|
A formatted string composed of the input contents, joined by `separator`.
|
|
35
35
|
|
|
36
36
|
Example:
|
|
37
|
-
Format messages with default separator:
|
|
37
|
+
# Format messages with default separator:
|
|
38
38
|
>>> from langchain_dev_utils.message_convert import format_sequence
|
|
39
39
|
>>> from langchain_core.messages import HumanMessage, AIMessage
|
|
40
40
|
>>> messages = [
|
|
@@ -44,7 +44,7 @@ def format_sequence(
|
|
|
44
44
|
>>> formatted = format_sequence(messages)
|
|
45
45
|
>>> formatted
|
|
46
46
|
|
|
47
|
-
Format with custom separator and numbering:
|
|
47
|
+
# Format with custom separator and numbering:
|
|
48
48
|
>>> formatted = format_sequence(messages, separator="---", with_num=True)
|
|
49
49
|
>>> formatted
|
|
50
50
|
"""
|
|
@@ -38,41 +38,27 @@ def create_parallel_pipeline(
|
|
|
38
38
|
sub_graphs: List of sub-graphs to execute in parallel
|
|
39
39
|
state_schema: state schema of the final constructed graph
|
|
40
40
|
graph_name: Name of the final constructed graph
|
|
41
|
-
branches_fn: Optional function to determine which sub-graphs to execute
|
|
41
|
+
branches_fn: Optional function to determine which sub-graphs to execute
|
|
42
|
+
in parallel
|
|
42
43
|
context_schema: context schema of the final constructed graph
|
|
43
44
|
input_schema: input schema of the final constructed graph
|
|
44
45
|
output_schema: output schema of the final constructed graph
|
|
45
|
-
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
46
|
+
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
47
|
+
graph
|
|
46
48
|
store: Optional LangGraph store for the final constructed graph
|
|
47
49
|
cache: Optional LangGraph cache for the final constructed graph
|
|
48
50
|
|
|
49
51
|
Returns:
|
|
50
|
-
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
52
|
+
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
53
|
+
graph of the pipeline.
|
|
51
54
|
|
|
52
55
|
Example:
|
|
53
|
-
Basic parallel pipeline
|
|
56
|
+
# Basic parallel pipeline: multiple specialized agents run concurrently
|
|
54
57
|
>>> from langchain_dev_utils.pipeline import create_parallel_pipeline
|
|
55
58
|
>>>
|
|
56
59
|
>>> graph = create_parallel_pipeline(
|
|
57
60
|
... sub_graphs=[
|
|
58
|
-
...
|
|
59
|
-
... model="vllm:qwen3-4b",
|
|
60
|
-
... tools=[get_current_time],
|
|
61
|
-
... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
|
|
62
|
-
... name="time_agent",
|
|
63
|
-
... ),
|
|
64
|
-
... create_agent(
|
|
65
|
-
... model="vllm:qwen3-4b",
|
|
66
|
-
... tools=[get_current_weather],
|
|
67
|
-
... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
|
|
68
|
-
... name="weather_agent",
|
|
69
|
-
... ),
|
|
70
|
-
... create_agent(
|
|
71
|
-
... model="vllm:qwen3-4b",
|
|
72
|
-
... tools=[get_current_user],
|
|
73
|
-
... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
|
|
74
|
-
... name="user_agent",
|
|
75
|
-
... ),
|
|
61
|
+
... time_agent, weather_agent, user_agent
|
|
76
62
|
... ],
|
|
77
63
|
... state_schema=AgentState,
|
|
78
64
|
... graph_name="parallel_agents_pipeline",
|
|
@@ -80,27 +66,10 @@ def create_parallel_pipeline(
|
|
|
80
66
|
>>>
|
|
81
67
|
>>> response = graph.invoke({"messages": [HumanMessage("Hello")]})
|
|
82
68
|
|
|
83
|
-
|
|
69
|
+
# Dynamic parallel pipeline: decide which agents to run based on conditional branches
|
|
84
70
|
>>> graph = create_parallel_pipeline(
|
|
85
71
|
... sub_graphs=[
|
|
86
|
-
...
|
|
87
|
-
... model="vllm:qwen3-4b",
|
|
88
|
-
... tools=[get_current_time],
|
|
89
|
-
... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
|
|
90
|
-
... name="time_agent",
|
|
91
|
-
... ),
|
|
92
|
-
... create_agent(
|
|
93
|
-
... model="vllm:qwen3-4b",
|
|
94
|
-
... tools=[get_current_weather],
|
|
95
|
-
... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
|
|
96
|
-
... name="weather_agent",
|
|
97
|
-
... ),
|
|
98
|
-
... create_agent(
|
|
99
|
-
... model="vllm:qwen3-4b",
|
|
100
|
-
... tools=[get_current_user],
|
|
101
|
-
... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
|
|
102
|
-
... name="user_agent",
|
|
103
|
-
... ),
|
|
72
|
+
... time_agent, weather_agent, user_agent
|
|
104
73
|
... ],
|
|
105
74
|
... state_schema=AgentState,
|
|
106
75
|
... branches_fn=lambda state: [
|
|
@@ -35,37 +35,22 @@ def create_sequential_pipeline(
|
|
|
35
35
|
context_schema: context schema of the final constructed graph
|
|
36
36
|
input_schema: input schema of the final constructed graph
|
|
37
37
|
output_schema: output schema of the final constructed graph
|
|
38
|
-
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
38
|
+
checkpointer: Optional LangGraph checkpointer for the final constructed
|
|
39
|
+
graph
|
|
39
40
|
store: Optional LangGraph store for the final constructed graph
|
|
40
41
|
cache: Optional LangGraph cache for the final constructed graph
|
|
41
42
|
|
|
42
43
|
Returns:
|
|
43
|
-
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
44
|
+
CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
|
|
45
|
+
graph of the pipeline.
|
|
44
46
|
|
|
45
47
|
Example:
|
|
46
|
-
Basic sequential pipeline with multiple specialized agents:
|
|
48
|
+
# Basic sequential pipeline with multiple specialized agents:
|
|
47
49
|
>>> from langchain_dev_utils.pipeline import create_sequential_pipeline
|
|
48
50
|
>>>
|
|
49
51
|
>>> graph = create_sequential_pipeline(
|
|
50
52
|
... sub_graphs=[
|
|
51
|
-
...
|
|
52
|
-
... model="vllm:qwen3-4b",
|
|
53
|
-
... tools=[get_current_time],
|
|
54
|
-
... system_prompt="You are a time query assistant. You can only answer questions about current time. If the question is unrelated to time, please directly respond with 'I cannot answer that'.",
|
|
55
|
-
... name="time_agent",
|
|
56
|
-
... ),
|
|
57
|
-
... create_agent(
|
|
58
|
-
... model="vllm:qwen3-4b",
|
|
59
|
-
... tools=[get_current_weather],
|
|
60
|
-
... system_prompt="You are a weather query assistant. You can only answer questions about current weather. If the question is unrelated to weather, please directly respond with 'I cannot answer that'.",
|
|
61
|
-
... name="weather_agent",
|
|
62
|
-
... ),
|
|
63
|
-
... create_agent(
|
|
64
|
-
... model="vllm:qwen3-4b",
|
|
65
|
-
... tools=[get_current_user],
|
|
66
|
-
... system_prompt="You are a user query assistant. You can only answer questions about current user. If the question is unrelated to user information, please directly respond with 'I cannot answer that'.",
|
|
67
|
-
... name="user_agent",
|
|
68
|
-
... ),
|
|
53
|
+
... time_agent, weather_agent, user_agent
|
|
69
54
|
... ],
|
|
70
55
|
... state_schema=AgentState,
|
|
71
56
|
... graph_name="sequential_agents_pipeline",
|
|
@@ -133,7 +133,7 @@ def human_in_the_loop(
|
|
|
133
133
|
If `func` is None, returns a decorator that will decorate the target function.
|
|
134
134
|
|
|
135
135
|
Example:
|
|
136
|
-
Basic usage with default handler:
|
|
136
|
+
# Basic usage with default handler:
|
|
137
137
|
>>> from langchain_dev_utils.tool_calling import human_in_the_loop
|
|
138
138
|
>>> from langchain_core.tools import tool
|
|
139
139
|
>>> import datetime
|
|
@@ -144,10 +144,10 @@ def human_in_the_loop(
|
|
|
144
144
|
... \"\"\"Get current timestamp\"\"\"
|
|
145
145
|
... return str(datetime.datetime.now().timestamp())
|
|
146
146
|
|
|
147
|
-
Usage with custom handler:
|
|
147
|
+
# Usage with custom handler:
|
|
148
148
|
>>> def custom_handler(params: InterruptParams) -> Any:
|
|
149
149
|
... response = interrupt(
|
|
150
|
-
...
|
|
150
|
+
... # Please add your custom interrupt response content here
|
|
151
151
|
... )
|
|
152
152
|
... if response["type"] == "accept":
|
|
153
153
|
... return params["tool"].invoke(params["tool_call_args"])
|
|
@@ -219,7 +219,7 @@ def human_in_the_loop_async(
|
|
|
219
219
|
If `func` is None, returns a decorator that will decorate the target function.
|
|
220
220
|
|
|
221
221
|
Example:
|
|
222
|
-
Basic usage with default handler:
|
|
222
|
+
# Basic usage with default handler:
|
|
223
223
|
>>> from langchain_dev_utils.tool_calling import human_in_the_loop_async
|
|
224
224
|
>>> from langchain_core.tools import tool
|
|
225
225
|
>>> import asyncio
|
|
@@ -232,10 +232,10 @@ def human_in_the_loop_async(
|
|
|
232
232
|
... await asyncio.sleep(1)
|
|
233
233
|
... return str(datetime.datetime.now().timestamp())
|
|
234
234
|
|
|
235
|
-
Usage with custom handler:
|
|
235
|
+
# Usage with custom handler:
|
|
236
236
|
>>> async def custom_handler(params: InterruptParams) -> Any:
|
|
237
237
|
... response = interrupt(
|
|
238
|
-
...
|
|
238
|
+
... ... # Please add your custom interrupt response content here
|
|
239
239
|
... )
|
|
240
240
|
... if response["type"] == "accept":
|
|
241
241
|
... return await params["tool"].ainvoke(params["tool_call_args"])
|
|
@@ -16,7 +16,7 @@ def has_tool_calling(message: AIMessage) -> bool:
|
|
|
16
16
|
bool: True if message is an AIMessage with tool calls, False otherwise
|
|
17
17
|
|
|
18
18
|
Example:
|
|
19
|
-
Check for tool calls in response:
|
|
19
|
+
# Check for tool calls in response:
|
|
20
20
|
>>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
|
|
21
21
|
>>> response = model.invoke("What time is it now?")
|
|
22
22
|
>>> if has_tool_calling(response):
|
|
@@ -50,14 +50,14 @@ def parse_tool_calling(
|
|
|
50
50
|
Union[tuple[str, dict], list[tuple[str, dict]]]: The tool call name and args
|
|
51
51
|
|
|
52
52
|
Example:
|
|
53
|
-
Parse single tool call:
|
|
53
|
+
# Parse single tool call:
|
|
54
54
|
>>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
|
|
55
55
|
>>> response = model.invoke("What time is it now?")
|
|
56
56
|
>>> response
|
|
57
57
|
>>> if has_tool_calling(response):
|
|
58
58
|
... tool_name, tool_args = parse_tool_calling(response, first_tool_call_only=True)
|
|
59
59
|
|
|
60
|
-
Parse multiple tool calls:
|
|
60
|
+
# Parse multiple tool calls:
|
|
61
61
|
>>> if has_tool_calling(response):
|
|
62
62
|
... tool_calls = parse_tool_calling(response)
|
|
63
63
|
"""
|