langchain-dev-utils 1.2.4__py3-none-any.whl → 1.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__ = "1.2.4"
1
+ __version__ = "1.2.6"
@@ -0,0 +1,39 @@
1
+ from importlib import util
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ def _check_langchain_openai_install() -> None:
7
+ if not util.find_spec("langchain_openai"):
8
+ msg = (
9
+ "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
10
+ )
11
+ raise ImportError(msg)
12
+
13
+
14
+ def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
15
+ """
16
+ Return 'base_url' if the model has a field named or aliased as 'base_url',
17
+ else return 'api_base' if it has a field named or aliased as 'api_base',
18
+ else return None.
19
+ The return value is always either 'base_url', 'api_base', or None.
20
+ """
21
+ model_fields = model_cls.model_fields
22
+
23
+ # try model_fields first
24
+ if "base_url" in model_fields:
25
+ return "base_url"
26
+
27
+ if "api_base" in model_fields:
28
+ return "api_base"
29
+
30
+ # then try aliases
31
+ for field_info in model_fields.values():
32
+ if field_info.alias == "base_url":
33
+ return "base_url"
34
+
35
+ for field_info in model_fields.values():
36
+ if field_info.alias == "api_base":
37
+ return "api_base"
38
+
39
+ return None
@@ -10,6 +10,7 @@ from langchain.agents.middleware.types import (
10
10
  _OutputAgentState,
11
11
  )
12
12
  from langchain.agents.structured_output import ResponseFormat
13
+ from langchain_core.messages import SystemMessage
13
14
  from langchain_core.tools import BaseTool
14
15
  from langgraph.cache.base import BaseCache
15
16
  from langgraph.graph.state import CompiledStateGraph
@@ -24,7 +25,7 @@ def create_agent( # noqa: PLR0915
24
25
  model: str,
25
26
  tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
26
27
  *,
27
- system_prompt: str | None = None,
28
+ system_prompt: str | SystemMessage | None = None,
28
29
  middleware: Sequence[AgentMiddleware[AgentState[ResponseT], ContextT]] = (),
29
30
  response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
30
31
  state_schema: type[AgentState[ResponseT]] | None = None,
@@ -78,24 +78,6 @@ class ModelRouterMiddleware(AgentMiddleware):
78
78
  Examples:
79
79
  ```python
80
80
  from langchain_dev_utils.agents.middleware import ModelRouterMiddleware
81
-
82
- model_list = [
83
- {
84
- "model_name": "vllm:qwen3-8b",
85
- "model_description": "Suitable for general conversation and text generation tasks"
86
- },
87
- {
88
- "model_name": "openrouter:qwen/qwen3-vl-32b-instruct",
89
- "model_description": "For visual tasks",
90
- "tools": []
91
- },
92
- {
93
- "model_name": "openrouter:qwen/qwen3-coder-plus",
94
- "model_description": "For code generation tasks",
95
- "tools": [run_python_code]
96
- }
97
- ]
98
-
99
81
  middleware = ModelRouterMiddleware(
100
82
  router_model="vllm:qwen3-4b",
101
83
  model_list=model_list
@@ -38,7 +38,7 @@ from pydantic import (
38
38
  )
39
39
  from typing_extensions import Self
40
40
 
41
- from ..types import ProviderConfig, ToolChoiceType
41
+ from ..types import CompatibilityOptions, ToolChoiceType
42
42
 
43
43
  _BM = TypeVar("_BM", bound=BaseModel)
44
44
  _DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
@@ -59,7 +59,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
59
59
  When calling with_structured_output, the default value of the method parameter is adjusted to "function_calling" (instead of the default "json_schema" in ChatOpenAI), providing better compatibility with other models.
60
60
 
61
61
  **3.Supports configuration of related parameters**
62
- For cases where parameters differ from the official OpenAI API, this library provides the provider_config parameter to address this issue. For example, when different model providers have inconsistent support for tool_choice, you can adapt by setting supported_tool_choice in provider_config.
62
+ For cases where parameters differ from the official OpenAI API, this library provides the compatibility_options parameter to address this issue. For example, when different model providers have inconsistent support for tool_choice, you can adapt by setting supported_tool_choice in compatibility_options.
63
63
 
64
64
  Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class extends capabilities to better support diverse OpenAI-compatible model providers while maintaining full compatibility with LangChain's chat model interface.
65
65
 
@@ -82,9 +82,15 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
82
82
 
83
83
  _provider: str = PrivateAttr(default="openai-compatible")
84
84
 
85
+ """Provider Compatibility Options"""
85
86
  supported_tool_choice: ToolChoiceType = Field(default_factory=list)
87
+ """Supported tool choice"""
86
88
  keep_reasoning_content: bool = Field(default=False)
89
+ """Whether to keep reasoning content in the messages"""
87
90
  support_json_mode: bool = Field(default=False)
91
+ """Whether to support JSON mode"""
92
+ include_usage: bool = Field(default=True)
93
+ """Whether to include usage information in the output"""
88
94
 
89
95
  @property
90
96
  def _supported_tool_choice(self) -> ToolChoiceType:
@@ -98,6 +104,10 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
98
104
  def _support_json_mode(self) -> bool:
99
105
  return self.support_json_mode
100
106
 
107
+ @property
108
+ def _include_usage(self) -> bool:
109
+ return self.include_usage
110
+
101
111
  @property
102
112
  def _llm_type(self) -> str:
103
113
  return f"chat-{self._provider}"
@@ -290,7 +300,8 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
290
300
  run_manager: Optional[CallbackManagerForLLMRun] = None,
291
301
  **kwargs: Any,
292
302
  ) -> Iterator[ChatGenerationChunk]:
293
- kwargs["stream_options"] = {"include_usage": True}
303
+ if self._include_usage:
304
+ kwargs["stream_options"] = {"include_usage": True}
294
305
  try:
295
306
  for chunk in super()._stream(
296
307
  messages, stop=stop, run_manager=run_manager, **kwargs
@@ -311,7 +322,8 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
311
322
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
312
323
  **kwargs: Any,
313
324
  ) -> AsyncIterator[ChatGenerationChunk]:
314
- kwargs["stream_options"] = {"include_usage": True}
325
+ if self._include_usage:
326
+ kwargs["stream_options"] = {"include_usage": True}
315
327
  try:
316
328
  async for chunk in super()._astream(
317
329
  messages, stop=stop, run_manager=run_manager, **kwargs
@@ -458,8 +470,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
458
470
  def _create_openai_compatible_model(
459
471
  provider: str,
460
472
  base_url: str,
461
- provider_config: Optional[ProviderConfig] = None,
462
- chat_model_cls_name: Optional[str] = None,
473
+ compatibility_options: Optional[CompatibilityOptions] = None,
463
474
  ) -> Type[_BaseChatOpenAICompatible]:
464
475
  """Factory function for creating provider-specific OpenAI-compatible model classes.
465
476
 
@@ -469,16 +480,15 @@ def _create_openai_compatible_model(
469
480
  Args:
470
481
  provider: Provider identifier (e.g., `vllm`,`openrouter`)
471
482
  base_url: Default API base URL for the provider
472
- provider_config: Optional configuration for the provider
473
- chat_model_cls_name: Optional custom name for the chat model class
483
+ compatibility_options: Optional configuration for the provider
474
484
 
475
485
  Returns:
476
486
  Configured model class ready for instantiation with provider-specific settings
477
487
  """
478
- chat_model_cls_name = chat_model_cls_name or f"Chat{provider.title()}"
488
+ chat_model_cls_name = f"Chat{provider.title()}"
489
+
490
+ compatibility_options = compatibility_options or {}
479
491
 
480
- if provider_config is None:
481
- provider_config = {}
482
492
  return create_model(
483
493
  chat_model_cls_name,
484
494
  __base__=_BaseChatOpenAICompatible,
@@ -504,14 +514,18 @@ def _create_openai_compatible_model(
504
514
  ),
505
515
  supported_tool_choice=(
506
516
  ToolChoiceType,
507
- Field(default=provider_config.get("supported_tool_choice", ["auto"])),
517
+ Field(default=compatibility_options.get("supported_tool_choice", ["auto"])),
508
518
  ),
509
519
  keep_reasoning_content=(
510
520
  bool,
511
- Field(default=provider_config.get("keep_reasoning_content", False)),
521
+ Field(default=compatibility_options.get("keep_reasoning_content", False)),
512
522
  ),
513
523
  support_json_mode=(
514
524
  bool,
515
- Field(default=provider_config.get("support_json_mode", False)),
525
+ Field(default=compatibility_options.get("support_json_mode", False)),
526
+ ),
527
+ include_usage=(
528
+ bool,
529
+ Field(default=compatibility_options.get("include_usage", True)),
516
530
  ),
517
531
  )
@@ -3,9 +3,13 @@ from typing import Any, NotRequired, Optional, TypedDict, cast
3
3
  from langchain.chat_models.base import _SUPPORTED_PROVIDERS, _init_chat_model_helper
4
4
  from langchain_core.language_models.chat_models import BaseChatModel
5
5
  from langchain_core.utils import from_env
6
- from pydantic import BaseModel
7
6
 
8
- from .types import ChatModelType, ProviderConfig
7
+ from langchain_dev_utils._utils import (
8
+ _check_langchain_openai_install,
9
+ _get_base_url_field_name,
10
+ )
11
+
12
+ from .types import ChatModelType, CompatibilityOptions
9
13
 
10
14
  _MODEL_PROVIDERS_DICT = {}
11
15
 
@@ -14,36 +18,8 @@ class ChatModelProvider(TypedDict):
14
18
  provider_name: str
15
19
  chat_model: ChatModelType
16
20
  base_url: NotRequired[str]
17
- provider_profile: NotRequired[dict[str, dict[str, Any]]]
18
- provider_config: NotRequired[ProviderConfig]
19
-
20
-
21
- def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
22
- """
23
- Return 'base_url' if the model has a field named or aliased as 'base_url',
24
- else return 'api_base' if it has a field named or aliased as 'api_base',
25
- else return None.
26
- The return value is always either 'base_url', 'api_base', or None.
27
- """
28
- model_fields = model_cls.model_fields
29
-
30
- # try model_fields first
31
- if "base_url" in model_fields:
32
- return "base_url"
33
-
34
- if "api_base" in model_fields:
35
- return "api_base"
36
-
37
- # then try aliases
38
- for field_info in model_fields.values():
39
- if field_info.alias == "base_url":
40
- return "base_url"
41
-
42
- for field_info in model_fields.values():
43
- if field_info.alias == "api_base":
44
- return "api_base"
45
-
46
- return None
21
+ model_profiles: NotRequired[dict[str, dict[str, Any]]]
22
+ compatibility_options: NotRequired[CompatibilityOptions]
47
23
 
48
24
 
49
25
  def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
@@ -89,17 +65,17 @@ def _load_chat_model_helper(
89
65
  BaseChatModel: Initialized chat model instance
90
66
  """
91
67
  model, model_provider = _parse_model(model, model_provider)
92
- if model_provider in _MODEL_PROVIDERS_DICT.keys():
68
+ if model_provider in _MODEL_PROVIDERS_DICT:
93
69
  chat_model = _MODEL_PROVIDERS_DICT[model_provider]["chat_model"]
94
70
  if base_url := _MODEL_PROVIDERS_DICT[model_provider].get("base_url"):
95
71
  url_key = _get_base_url_field_name(chat_model)
96
72
  if url_key:
97
73
  kwargs.update({url_key: base_url})
98
- if provider_profile := _MODEL_PROVIDERS_DICT[model_provider].get(
99
- "provider_profile"
74
+ if model_profiles := _MODEL_PROVIDERS_DICT[model_provider].get(
75
+ "model_profiles"
100
76
  ):
101
- if model in provider_profile:
102
- kwargs.update({"profile": provider_profile[model]})
77
+ if model in model_profiles and "profile" not in kwargs:
78
+ kwargs.update({"profile": model_profiles[model]})
103
79
  return chat_model(model=model, **kwargs)
104
80
 
105
81
  return _init_chat_model_helper(model, model_provider=model_provider, **kwargs)
@@ -109,8 +85,8 @@ def register_model_provider(
109
85
  provider_name: str,
110
86
  chat_model: ChatModelType,
111
87
  base_url: Optional[str] = None,
112
- provider_profile: Optional[dict[str, dict[str, Any]]] = None,
113
- provider_config: Optional[ProviderConfig] = None,
88
+ model_profiles: Optional[dict[str, dict[str, Any]]] = None,
89
+ compatibility_options: Optional[CompatibilityOptions] = None,
114
90
  ):
115
91
  """Register a new model provider.
116
92
 
@@ -119,12 +95,11 @@ def register_model_provider(
119
95
  string identifiers for supported providers.
120
96
 
121
97
  Args:
122
- provider_name: Name of the provider to register
123
- chat_model: Either a BaseChatModel class or a string identifier for a supported provider
124
- base_url: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
125
- provider_profile: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
126
- provider_config: The configuration of the model provider (Optional parameter;effective only when `chat_model` is a string and is "openai-compatible".)
127
- It can be configured to configure some related parameters of the provider, such as whether to support json_mode structured output mode, the list of supported tool_choice
98
+ provider_name: The name of the model provider, used as an identifier for loading models later.
99
+ chat_model: The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported).
100
+ base_url: The API endpoint URL of the model provider (optional; applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`).
101
+ model_profiles: Declares the capabilities and parameters supported by each model provided by this provider (optional; applicable to both `chat_model` types). The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling`etc.).
102
+ compatibility_options: Compatibility options for the model provider (optional; only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation.
128
103
  Raises:
129
104
  ValueError: If base_url is not provided when chat_model is a string,
130
105
  or if chat_model string is not in supported providers
@@ -146,12 +121,9 @@ def register_model_provider(
146
121
  """
147
122
  base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
148
123
  if isinstance(chat_model, str):
149
- try:
150
- from .adapters.openai_compatible import _create_openai_compatible_model
151
- except ImportError:
152
- raise ImportError(
153
- "Please install langchain_dev_utils[standard],when chat_model is a 'openai-compatible'"
154
- )
124
+ _check_langchain_openai_install()
125
+ from .adapters.openai_compatible import _create_openai_compatible_model
126
+
155
127
  if base_url is None:
156
128
  raise ValueError(
157
129
  f"base_url must be provided or set {provider_name.upper()}_API_BASE environment variable when chat_model is a string"
@@ -164,15 +136,14 @@ def register_model_provider(
164
136
  chat_model = _create_openai_compatible_model(
165
137
  provider_name,
166
138
  base_url,
167
- provider_config=provider_config,
139
+ compatibility_options=compatibility_options,
168
140
  )
169
141
  _MODEL_PROVIDERS_DICT.update(
170
142
  {
171
143
  provider_name: {
172
144
  "chat_model": chat_model,
173
- "provider_config": provider_config,
174
145
  "base_url": base_url,
175
- "provider_profile": provider_profile,
146
+ "model_profiles": model_profiles,
176
147
  }
177
148
  }
178
149
  )
@@ -183,7 +154,7 @@ def register_model_provider(
183
154
  provider_name: {
184
155
  "chat_model": chat_model,
185
156
  "base_url": base_url,
186
- "provider_profile": provider_profile,
157
+ "model_profiles": model_profiles,
187
158
  }
188
159
  }
189
160
  )
@@ -192,7 +163,7 @@ def register_model_provider(
192
163
  {
193
164
  provider_name: {
194
165
  "chat_model": chat_model,
195
- "provider_profile": provider_profile,
166
+ "model_profiles": model_profiles,
196
167
  }
197
168
  }
198
169
  )
@@ -208,36 +179,50 @@ def batch_register_model_provider(
208
179
 
209
180
  Args:
210
181
  providers: List of ChatModelProvider dictionaries, each containing:
211
- - provider_name: Name of the provider to register
212
- - chat_model: Either a BaseChatModel class or a string identifier for a supported provider
213
- - base_url: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
214
- - provider_profile: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
215
- - provider_config: The configuration of the model provider(Optional parameter; effective only when `chat_model` is a string and is "openai-compatible".)
216
- It can be configured to configure some related parameters of the provider, such as whether to support json_mode structured output mode, the list of supported tool_choice
182
+ - provider_name (str): The name of the model provider, used as an
183
+ identifier for loading models later.
184
+ - chat_model (ChatModel | str): The chat model, which can be either
185
+ a `ChatModel` instance or a string (currently only `"openai-compatible"`
186
+ is supported).
187
+ - base_url (str, optional): The API endpoint URL of the model provider.
188
+ Applicable to both `chat_model` types, but primarily used when `chat_model`
189
+ is `"openai-compatible"`.
190
+ - model_profiles (dict, optional): Declares the capabilities and parameters
191
+ supported by each model. The configuration will be loaded and assigned to
192
+ `model.profile` (e.g., `max_input_tokens`, `tool_calling`, etc.).
193
+ - compatibility_options (CompatibilityOptions, optional): Compatibility
194
+ options for the model provider. Only effective when `chat_model` is
195
+ `"openai-compatible"`. Used to declare support for OpenAI-compatible features
196
+ (e.g., `tool_choice` strategies, JSON mode, etc.).
217
197
 
218
198
  Raises:
219
199
  ValueError: If any of the providers are invalid
220
200
 
221
201
  Example:
222
- Register multiple providers at once:
223
- >>> from langchain_dev_utils.chat_models import batch_register_model_provider, load_chat_model
224
- >>> from langchain_core.language_models.fake_chat_models import FakeChatModel
225
- >>>
226
- >>> batch_register_model_provider([
227
- ... {
228
- ... "provider_name": "fakechat",
229
- ... "chat_model": FakeChatModel,
230
- ... },
231
- ... {
232
- ... "provider_name": "vllm",
233
- ... "chat_model": "openai-compatible",
234
- ... "base_url": "http://localhost:8000/v1",
235
- ... },
236
- ... ])
237
- >>> model = load_chat_model(model="fakechat:fake-model")
238
- >>> model.invoke("Hello")
239
- >>> model = load_chat_model(model="vllm:qwen3-4b")
240
- >>> model.invoke("Hello")
202
+ Register multiple providers at once::
203
+
204
+ >>> from langchain_dev_utils.chat_models import batch_register_model_provider, load_chat_model
205
+ >>> from langchain_core.language_models.fake_chat_models import FakeChatModel
206
+ >>>
207
+ >>> # Register multiple providers
208
+ >>> batch_register_model_provider([
209
+ ... {
210
+ ... "provider_name": "fakechat",
211
+ ... "chat_model": FakeChatModel,
212
+ ... },
213
+ ... {
214
+ ... "provider_name": "vllm",
215
+ ... "chat_model": "openai-compatible",
216
+ ... "base_url": "http://localhost:8000/v1",
217
+ ... },
218
+ ... ])
219
+ >>>
220
+ >>> # Use registered providers
221
+ >>> model = load_chat_model("fakechat:fake-model")
222
+ >>> model.invoke("Hello")
223
+ >>>
224
+ >>> model = load_chat_model("vllm:qwen3-4b")
225
+ >>> model.invoke("Hello")
241
226
  """
242
227
 
243
228
  for provider in providers:
@@ -245,8 +230,8 @@ def batch_register_model_provider(
245
230
  provider["provider_name"],
246
231
  provider["chat_model"],
247
232
  provider.get("base_url"),
248
- provider_profile=provider.get("provider_profile"),
249
- provider_config=provider.get("provider_config"),
233
+ model_profiles=provider.get("model_profiles"),
234
+ compatibility_options=provider.get("compatibility_options"),
250
235
  )
251
236
 
252
237
 
@@ -9,7 +9,8 @@ ChatModelType = Union[type[BaseChatModel], Literal["openai-compatible"]]
9
9
  ToolChoiceType = list[Literal["auto", "none", "required", "specific"]]
10
10
 
11
11
 
12
- class ProviderConfig(TypedDict):
12
+ class CompatibilityOptions(TypedDict):
13
13
  supported_tool_choice: NotRequired[ToolChoiceType]
14
14
  keep_reasoning_content: NotRequired[bool]
15
15
  support_json_mode: NotRequired[bool]
16
+ include_usage: NotRequired[bool]
@@ -2,7 +2,11 @@ from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
2
2
 
3
3
  from langchain.embeddings.base import Embeddings, _SUPPORTED_PROVIDERS, init_embeddings
4
4
  from langchain_core.utils import from_env, secret_from_env
5
- from pydantic import BaseModel
5
+
6
+ from langchain_dev_utils._utils import (
7
+ _check_langchain_openai_install,
8
+ _get_base_url_field_name,
9
+ )
6
10
 
7
11
  _EMBEDDINGS_PROVIDERS_DICT = {}
8
12
 
@@ -15,34 +19,6 @@ class EmbeddingProvider(TypedDict):
15
19
  base_url: NotRequired[str]
16
20
 
17
21
 
18
- def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
19
- """
20
- Return 'base_url' if the model has a field named or aliased as 'base_url',
21
- else return 'api_base' if it has a field named or aliased as 'api_base',
22
- else return None.
23
- The return value is always either 'base_url', 'api_base', or None.
24
- """
25
- model_fields = model_cls.model_fields
26
-
27
- # try model_fields first
28
- if "base_url" in model_fields:
29
- return "base_url"
30
-
31
- if "api_base" in model_fields:
32
- return "api_base"
33
-
34
- # then try aliases
35
- for field_info in model_fields.values():
36
- if field_info.alias == "base_url":
37
- return "base_url"
38
-
39
- for field_info in model_fields.values():
40
- if field_info.alias == "api_base":
41
- return "api_base"
42
-
43
- return None
44
-
45
-
46
22
  def _parse_model_string(model_name: str) -> tuple[str, str]:
47
23
  """Parse model string into provider and model name.
48
24
 
@@ -119,6 +95,8 @@ def register_embeddings_provider(
119
95
  "when embeddings_model is a string, the value must be 'openai-compatible'"
120
96
  )
121
97
 
98
+ _check_langchain_openai_install()
99
+
122
100
  _EMBEDDINGS_PROVIDERS_DICT.update(
123
101
  {
124
102
  provider_name: {
@@ -238,7 +216,6 @@ def load_embeddings(
238
216
  if embeddings == "openai-compatible":
239
217
  kwargs["check_embedding_ctx_length"] = False
240
218
  embeddings = "openai"
241
-
242
219
  return init_embeddings(
243
220
  model=model,
244
221
  provider=embeddings,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.4
3
+ Version: 1.2.6
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -57,19 +57,23 @@ Mainly consists of the following two functions:
57
57
  - `register_model_provider`: Register a chat model provider
58
58
  - `load_chat_model`: Load a chat model
59
59
 
60
- `register_model_provider` parameter description:
60
+ **`register_model_provider` Parameters:**
61
61
 
62
- - `provider_name`: Model provider name, used as an identifier for subsequent model loading
63
- - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
64
- - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
65
- - `provider_profile`: Model provider's model configuration file (optional, valid for both types of `chat_model`); finally, it will read the corresponding model configuration parameters based on `model_name` and set them to `model.profile`.
66
- - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
62
+ | Parameter | Type | Required | Default | Description |
63
+ |-----------|------|----------|---------|-------------|
64
+ | `provider_name` | str | Yes | - | The name of the model provider, used as an identifier for loading models later. |
65
+ | `chat_model` | ChatModel \| str | Yes | - | The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported). |
66
+ | `base_url` | str | No | - | The API endpoint URL of the model provider (applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`). |
67
+ | `model_profiles` | dict | No | - | Declares the capabilities and parameters supported by each model provided by this provider. The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling` etc.). |
68
+ | `compatibility_options` | dict | No | - | Compatibility options for the model provider (only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation. |
67
69
 
68
- `load_chat_model` parameter description:
70
+ **`load_chat_model` Parameters:**
69
71
 
70
- - `model`: Chat model name, type str
71
- - `model_provider`: Chat model provider name, type str, optional
72
- - `kwargs`: Additional parameters passed to the chat model class, e.g., temperature, top_p, etc.
72
+ | Parameter | Type | Required | Default | Description |
73
+ |-----------|------|----------|---------|-------------|
74
+ | `model` | str | Yes | - | Chat model name |
75
+ | `model_provider` | str | No | - | Chat model provider name |
76
+ | `kwargs` | dict | No | - | Additional parameters passed to the chat model class, e.g., temperature, top_p, etc. |
73
77
 
74
78
  Example for integrating a qwen3-4b model deployed using `vllm`:
75
79
 
@@ -98,17 +102,21 @@ Mainly consists of the following two functions:
98
102
  - `register_embeddings_provider`: Register an embedding model provider
99
103
  - `load_embeddings`: Load an embedding model
100
104
 
101
- `register_embeddings_provider` parameter description:
105
+ **`register_embeddings_provider` Parameters:**
102
106
 
103
- - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
104
- - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
105
- - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
107
+ | Parameter | Type | Required | Default | Description |
108
+ |-----------|------|----------|---------|-------------|
109
+ | `provider_name` | str | Yes | - | Embedding model provider name, used as an identifier for subsequent model loading |
110
+ | `embeddings_model` | Embeddings \| str | Yes | - | Embedding model, can be Embeddings or a string (currently supports "openai-compatible") |
111
+ | `base_url` | str | No | - | The API address of the Embedding model provider (valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible") |
106
112
 
107
- `load_embeddings` parameter description:
113
+ **`load_embeddings` Parameters:**
108
114
 
109
- - `model`: Embedding model name, type str
110
- - `provider`: Embedding model provider name, type str, optional
111
- - `kwargs`: Other additional parameters
115
+ | Parameter | Type | Required | Default | Description |
116
+ |-----------|------|----------|---------|-------------|
117
+ | `model` | str | Yes | - | Embedding model name |
118
+ | `provider` | str | No | - | Embedding model provider name |
119
+ | `kwargs` | dict | No | - | Other additional parameters |
112
120
 
113
121
  Example for integrating a qwen3-embedding-4b model deployed using `vllm`:
114
122
 
@@ -142,11 +150,15 @@ Includes the following features:
142
150
 
143
151
  For stream responses obtained using `stream()` and `astream()`, you can use `merge_ai_message_chunk` to merge them into a final AIMessage.
144
152
 
145
- `merge_ai_message_chunk` parameter description:
153
+ **`merge_ai_message_chunk` Parameters:**
146
154
 
147
- - `chunks`: List of AIMessageChunk
155
+ | Parameter | Type | Required | Default | Description |
156
+ |-----------|------|----------|---------|-------------|
157
+ | `chunks` | List[AIMessageChunk] | Yes | - | List of AIMessageChunk objects |
148
158
 
149
159
  ```python
160
+ from langchain_dev_utils.message_convert import merge_ai_message_chunk
161
+
150
162
  chunks = list(model.stream("Hello"))
151
163
  merged = merge_ai_message_chunk(chunks)
152
164
  ```
@@ -155,16 +167,16 @@ merged = merge_ai_message_chunk(chunks)
155
167
 
156
168
  For a list, you can use `format_sequence` to format it.
157
169
 
158
- `format_sequence` parameter description:
170
+ **`format_sequence` Parameters:**
159
171
 
160
- - `inputs`: A list containing any of the following types:
161
- - langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
162
- - langchain_core.documents.Document
163
- - str
164
- - `separator`: String used to join the content, defaults to "-".
165
- - `with_num`: If True, add a numeric prefix to each item (e.g., "1. Hello"), defaults to False.
172
+ | Parameter | Type | Required | Default | Description |
173
+ |-----------|------|----------|---------|-------------|
174
+ | `inputs` | List | Yes | - | A list containing any of the following types: langchain_core.messages, langchain_core.documents.Document, str |
175
+ | `separator` | str | No | "-" | String used to join the content |
176
+ | `with_num` | bool | No | False | If True, add a numeric prefix to each item (e.g., "1. Hello") |
166
177
 
167
178
  ```python
179
+ from langchain_dev_utils.message_convert import format_sequence
168
180
  text = format_sequence([
169
181
  "str1",
170
182
  "str2",
@@ -185,14 +197,18 @@ Includes the following features:
185
197
 
186
198
  `has_tool_calling` and `parse_tool_calling` are used to check and parse tool calls.
187
199
 
188
- `has_tool_calling` parameter description:
200
+ **`has_tool_calling` Parameters:**
189
201
 
190
- - `message`: AIMessage object
202
+ | Parameter | Type | Required | Default | Description |
203
+ |-----------|------|----------|---------|-------------|
204
+ | `message` | AIMessage | Yes | - | AIMessage object |
191
205
 
192
- `parse_tool_calling` parameter description:
206
+ **`parse_tool_calling` Parameters:**
193
207
 
194
- - `message`: AIMessage object
195
- - `first_tool_call_only`: Whether to only check the first tool call
208
+ | Parameter | Type | Required | Default | Description |
209
+ |-----------|------|----------|---------|-------------|
210
+ | `message` | AIMessage | Yes | - | AIMessage object |
211
+ | `first_tool_call_only` | bool | No | False | Whether to only parse the first tool call |
196
212
 
197
213
  ```python
198
214
  import datetime
@@ -221,7 +237,7 @@ if has_tool_calling(response):
221
237
  Both can accept a `handler` parameter for custom breakpoint return and response handling logic.
222
238
 
223
239
  ```python
224
- from langchain_dev_utils import human_in_the_loop
240
+ from langchain_dev_utils.tool_calling import human_in_the_loop
225
241
  from langchain_core.tools import tool
226
242
  import datetime
227
243
 
@@ -245,6 +261,13 @@ Includes the following features:
245
261
 
246
262
  In LangChain v1, the officially provided `create_agent` function can be used to create a single agent, where the model parameter supports passing a BaseChatModel instance or a specific string (when passing a string, it is limited to the models supported by `init_chat_model`). To extend the flexibility of specifying models via strings, this library provides a functionally identical `create_agent` function, allowing you to directly use models supported by `load_chat_model` (requires prior registration).
247
263
 
264
+ **`create_agent` Parameters:**
265
+
266
+ | Parameter | Type | Required | Default | Description |
267
+ |-----------|------|----------|---------|-------------|
268
+ | `model` | str | Yes | - | Model name or model instance. Can be a string identifier for a model registered with `register_model_provider` or a BaseChatModel instance. |
269
+ | Other parameters | Various | No | - | All other parameters are the same as in `langchain.agents.create_agent` |
270
+
248
271
  Usage example:
249
272
 
250
273
  ```python
@@ -293,15 +316,19 @@ Includes the following features:
293
316
  Sequential graph orchestration:
294
317
  Uses `create_sequential_pipeline`, supported parameters:
295
318
 
296
- - `sub_graphs`: List of state graphs to combine (must be StateGraph instances)
297
- - `state_schema`: State Schema for the final generated graph
298
- - `graph_name`: Name of the final generated graph (optional)
299
- - `context_schema`: Context Schema for the final generated graph (optional)
300
- - `input_schema`: Input Schema for the final generated graph (optional)
301
- - `output_schema`: Output Schema for the final generated graph (optional)
302
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
303
- - `store`: LangGraph persistence Store (optional)
304
- - `cache`: LangGraph Cache (optional)
319
+ **`create_sequential_pipeline` Parameters:**
320
+
321
+ | Parameter | Type | Required | Default | Description |
322
+ |-----------|------|----------|---------|-------------|
323
+ | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
324
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
325
+ | `graph_name` | str | No | - | Name of the final generated graph |
326
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
327
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
328
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
329
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
330
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
331
+ | `cache` | BaseCache | No | - | LangGraph Cache |
305
332
 
306
333
  ```python
307
334
  from langchain.agents import AgentState
@@ -350,16 +377,20 @@ print(response)
350
377
  Parallel graph orchestration:
351
378
  Uses `create_parallel_pipeline`, supported parameters:
352
379
 
353
- - `sub_graphs`: List of state graphs to combine
354
- - `state_schema`: State Schema for the final generated graph
355
- - `branches_fn`: Parallel branch function, returns a list of Send objects to control parallel execution
356
- - `graph_name`: Name of the final generated graph (optional)
357
- - `context_schema`: Context Schema for the final generated graph (optional)
358
- - `input_schema`: Input Schema for the final generated graph (optional)
359
- - `output_schema`: Output Schema for the final generated graph (optional)
360
- - `checkpoint`: LangGraph persistence Checkpoint (optional)
361
- - `store`: LangGraph persistence Store (optional)
362
- - `cache`: LangGraph Cache (optional)
380
+ **`create_parallel_pipeline` Parameters:**
381
+
382
+ | Parameter | Type | Required | Default | Description |
383
+ |-----------|------|----------|---------|-------------|
384
+ | `sub_graphs` | List[StateGraph\|CompiledStateGraph] | Yes | - | List of state graphs to combine (must be StateGraph instances or CompiledStateGraph instances) |
385
+ | `state_schema` | type | Yes | - | State Schema for the final generated graph |
386
+ | `branches_fn` | Callable | Yes | - | Parallel branch function, returns a list of Send objects to control parallel execution |
387
+ | `graph_name` | str | No | - | Name of the final generated graph |
388
+ | `context_schema` | type | No | - | Context Schema for the final generated graph |
389
+ | `input_schema` | type | No | - | Input Schema for the final generated graph |
390
+ | `output_schema` | type | No | - | Output Schema for the final generated graph |
391
+ | `checkpoint` | BaseCheckpointSaver | No | - | LangGraph persistence Checkpoint |
392
+ | `store` | BaseStore | No | - | LangGraph persistence Store |
393
+ | `cache` | BaseCache | No | - | LangGraph Cache |
363
394
 
364
395
  ```python
365
396
  from langchain_dev_utils.pipeline import create_parallel_pipeline
@@ -398,4 +429,4 @@ print(response)
398
429
 
399
430
  - [GitHub Repository](https://github.com/TBice123123/langchain-dev-utils) — Browse source code, submit Pull Requests
400
431
  - [Issue Tracker](https://github.com/TBice123123/langchain-dev-utils/issues) — Report bugs or suggest improvements
401
- - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
432
+ - We welcome contributions in all forms — whether code, documentation, or usage examples. Let's build a more powerful and practical LangChain development ecosystem together!
@@ -1,24 +1,25 @@
1
- langchain_dev_utils/__init__.py,sha256=XBKH8E1LmDxv06U39yqMBbXZapOERFgICEDYZs_kRso,22
1
+ langchain_dev_utils/__init__.py,sha256=vMQK58X8_YZGKzRm0ThvPAKFtpfyejGmUnDrY9RQ13w,22
2
+ langchain_dev_utils/_utils.py,sha256=5bFs4cf3HvkMNkv35V8Sowu4YSXmfF5VNwmv_eHfkgQ,1151
2
3
  langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
4
  langchain_dev_utils/agents/__init__.py,sha256=e17SMQdJIQngbUCr2N1tY-yw0tD3tEnH7PSvyDmVPeQ,127
4
- langchain_dev_utils/agents/factory.py,sha256=h4uAkid2NJMMs4qV6RYFexew-ixfhEvpa254eDeDEcU,3912
5
+ langchain_dev_utils/agents/factory.py,sha256=JjdJwPTJpQwAlwQlBalbuGej5Jcpy2Fz6lH3EwEaxQo,3979
5
6
  langchain_dev_utils/agents/file_system.py,sha256=S6RUEmQI2eerW0gBQp0IP0X5ak5FwvqgIGRiycr2iyw,8468
6
7
  langchain_dev_utils/agents/plan.py,sha256=ydJuJLlNydheQvLPl2uCc3TBVv42YxGzPhKgtldIdIk,6497
7
8
  langchain_dev_utils/agents/wrap.py,sha256=4BWksU9DRz8c3ZHQiUi4GHwGhNysDLNs8pmLWV7BeAI,5165
8
9
  langchain_dev_utils/agents/middleware/__init__.py,sha256=cjrb8Rue5uukl9pKPF7CjSrHtcYsUBj3Mdvv2szlp7E,679
9
10
  langchain_dev_utils/agents/middleware/model_fallback.py,sha256=pXdraahOMukLgvjX70LwhrjIoEhLYQfNEwJMQHG2WPk,1673
10
- langchain_dev_utils/agents/middleware/model_router.py,sha256=bq-sQ7wmvpeRM3cjFPErLQxiWfS6l5nEVBN01NNWjAU,9077
11
+ langchain_dev_utils/agents/middleware/model_router.py,sha256=Qb_s_FoREp11yKHdmp_ZTRxB1whsFrj86awUNR0fpCk,8461
11
12
  langchain_dev_utils/agents/middleware/plan.py,sha256=saRXhzkC2pd7LNiNclSmGJelmisbTXhhTrbSUkSkf9g,16220
12
13
  langchain_dev_utils/agents/middleware/summarization.py,sha256=BtWPJcQBssGAT0nb1c0xsGEOsb8x5sAAE6xqujYjHhY,3027
13
14
  langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=u9rV24yUB-dyc1uUfUe74B1wOGVI3TZRwxkE1bvGm18,2025
14
15
  langchain_dev_utils/agents/middleware/tool_selection.py,sha256=ZqdyK4Yhp2u3GM6B_D6U7Srca9vy1o7s6N_LrV24-dQ,3107
15
16
  langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
16
- langchain_dev_utils/chat_models/base.py,sha256=emQs5biWgeqP9a8cEoovpOzn34w91-_SiTnytu-C5PM,12051
17
- langchain_dev_utils/chat_models/types.py,sha256=FM_RyiGRTb1dy59MovhDYM4Kj9cpybt2BFha0e2u0qA,468
17
+ langchain_dev_utils/chat_models/base.py,sha256=AYRcGViGJYsquqru_www3zt8-ZCkfzPCrw-dFF6HDts,11661
18
+ langchain_dev_utils/chat_models/types.py,sha256=M0iCGWgXmX1f1vkymH-jNGdFQlsJS5JqpmgHctUS9jw,512
18
19
  langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=wR7Uym9rQWsxhsBt8LrE30dayWcMnbsU8AXC4kOtnyI,19719
20
+ langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=4Q8ySa7jS2_AFo0oxLoqeY_aQyPppvV-DAMLt2rmGoE,20192
20
21
  langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
21
- langchain_dev_utils/embeddings/base.py,sha256=25ebUEaf7075h8NARgTHjAvwK6JddhHor5upiucJqu0,9686
22
+ langchain_dev_utils/embeddings/base.py,sha256=lGZWbi6G1M0OcAO_d_k1QAFJm9z9gM0L4UAZ6xFtEoQ,8973
22
23
  langchain_dev_utils/message_convert/__init__.py,sha256=xwjaQ1oJoc80xy70oQI4uW3gAmgV5JymJd5hgnA6s3g,458
23
24
  langchain_dev_utils/message_convert/content.py,sha256=ApmQ7fUUBO3Ihjm2hYSWd4GrU_CvrjbWla-MA7DAFRc,7758
24
25
  langchain_dev_utils/message_convert/format.py,sha256=fh4GyyuZBTMrHeCEwdu9fOh5n8tdli1vDF44jK1i-tI,2373
@@ -29,7 +30,7 @@ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV
29
30
  langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
30
31
  langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=nbaON9806pv5tpMRQUA_Ch3HJA5HBFgzZR7kQRf6PiY,9819
31
32
  langchain_dev_utils/tool_calling/utils.py,sha256=3cNv_Zx32KxdsGn8IkxjWUzxYEEwVJeJgTZTbfSg0pA,2751
32
- langchain_dev_utils-1.2.4.dist-info/METADATA,sha256=1bYaEqoBkx54fvr35w355lth8_OwMz4g2OS1SWnzlag,16536
33
- langchain_dev_utils-1.2.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
34
- langchain_dev_utils-1.2.4.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
35
- langchain_dev_utils-1.2.4.dist-info/RECORD,,
33
+ langchain_dev_utils-1.2.6.dist-info/METADATA,sha256=PhJoxRlERmnMzRumGzLW3kwU58Cg3Tt6MiFibsfIi8U,19090
34
+ langchain_dev_utils-1.2.6.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
35
+ langchain_dev_utils-1.2.6.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
36
+ langchain_dev_utils-1.2.6.dist-info/RECORD,,