langchain-dev-utils 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. langchain_dev_utils/__init__.py +1 -1
  2. langchain_dev_utils/_utils.py +9 -5
  3. langchain_dev_utils/agents/__init__.py +0 -1
  4. langchain_dev_utils/agents/factory.py +2 -10
  5. langchain_dev_utils/agents/file_system.py +1 -1
  6. langchain_dev_utils/agents/middleware/__init__.py +2 -0
  7. langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
  8. langchain_dev_utils/agents/middleware/model_router.py +37 -46
  9. langchain_dev_utils/agents/middleware/plan.py +17 -18
  10. langchain_dev_utils/agents/middleware/summarization.py +6 -4
  11. langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  12. langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
  13. langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
  14. langchain_dev_utils/agents/plan.py +1 -1
  15. langchain_dev_utils/agents/wrap.py +8 -20
  16. langchain_dev_utils/chat_models/adapters/openai_compatible.py +105 -59
  17. langchain_dev_utils/chat_models/base.py +30 -15
  18. langchain_dev_utils/chat_models/types.py +6 -3
  19. langchain_dev_utils/embeddings/base.py +35 -18
  20. langchain_dev_utils/message_convert/__init__.py +0 -1
  21. langchain_dev_utils/message_convert/content.py +8 -11
  22. langchain_dev_utils/message_convert/format.py +2 -2
  23. langchain_dev_utils/pipeline/parallel.py +10 -41
  24. langchain_dev_utils/pipeline/sequential.py +6 -21
  25. langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
  26. langchain_dev_utils/tool_calling/utils.py +3 -3
  27. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.8.dist-info}/METADATA +24 -119
  28. langchain_dev_utils-1.2.8.dist-info/RECORD +37 -0
  29. langchain_dev_utils-1.2.6.dist-info/RECORD +0 -36
  30. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.8.dist-info}/WHEEL +0 -0
  31. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.8.dist-info}/licenses/LICENSE +0 -0
@@ -1,4 +1,5 @@
1
1
  from __future__ import annotations
2
+
2
3
  from collections.abc import AsyncIterator, Iterator
3
4
  from json import JSONDecodeError
4
5
  from typing import (
@@ -13,12 +14,13 @@ from typing import (
13
14
  Union,
14
15
  )
15
16
 
17
+ import openai
16
18
  from langchain_core.callbacks import (
17
19
  AsyncCallbackManagerForLLMRun,
18
20
  CallbackManagerForLLMRun,
19
21
  )
20
22
  from langchain_core.language_models import LangSmithParams, LanguageModelInput
21
- from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
23
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
22
24
  from langchain_core.outputs import ChatGenerationChunk, ChatResult
23
25
  from langchain_core.runnables import Runnable
24
26
  from langchain_core.tools import BaseTool
@@ -26,7 +28,6 @@ from langchain_core.utils import from_env, secret_from_env
26
28
  from langchain_core.utils.function_calling import convert_to_openai_tool
27
29
  from langchain_openai.chat_models._compat import _convert_from_v1_to_chat_completions
28
30
  from langchain_openai.chat_models.base import BaseChatOpenAI, _convert_message_to_dict
29
- import openai
30
31
  from pydantic import (
31
32
  BaseModel,
32
33
  ConfigDict,
@@ -38,33 +39,64 @@ from pydantic import (
38
39
  )
39
40
  from typing_extensions import Self
40
41
 
41
- from ..types import CompatibilityOptions, ToolChoiceType
42
+ from ..types import (
43
+ CompatibilityOptions,
44
+ ReasoningContentKeepType,
45
+ ResponseFormatType,
46
+ ToolChoiceType,
47
+ )
42
48
 
43
49
  _BM = TypeVar("_BM", bound=BaseModel)
44
50
  _DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
45
51
  _DictOrPydantic = Union[dict, _BM]
46
52
 
47
53
 
54
+ def _get_last_human_message_index(messages: list[BaseMessage]) -> int:
55
+ """find the index of the last HumanMessage in the messages list, return -1 if not found."""
56
+ return next(
57
+ (
58
+ i
59
+ for i in range(len(messages) - 1, -1, -1)
60
+ if isinstance(messages[i], HumanMessage)
61
+ ),
62
+ -1,
63
+ )
64
+
65
+
48
66
  class _BaseChatOpenAICompatible(BaseChatOpenAI):
49
67
  """
50
68
  Base template class for OpenAI-compatible chat model implementations.
51
69
 
52
- This class provides a foundation for integrating various LLM providers that offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot, and many others).
53
- It enhances the base OpenAI functionality by:
54
-
55
- **1.Supports output of more types of reasoning content (reasoning_content)**
56
- ChatOpenAI can only output reasoning content natively supported by official OpenAI models, while OpenAICompatibleChatModel can output reasoning content from other model providers (e.g., OpenRouter).
57
-
58
- **2.Optimizes default behavior for structured output**
59
- When calling with_structured_output, the default value of the method parameter is adjusted to "function_calling" (instead of the default "json_schema" in ChatOpenAI), providing better compatibility with other models.
60
-
61
- **3.Supports configuration of related parameters**
62
- For cases where parameters differ from the official OpenAI API, this library provides the compatibility_options parameter to address this issue. For example, when different model providers have inconsistent support for tool_choice, you can adapt by setting supported_tool_choice in compatibility_options.
63
-
64
- Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class extends capabilities to better support diverse OpenAI-compatible model providers while maintaining full compatibility with LangChain's chat model interface.
65
-
66
- Note: This is a template class and should not be exported or instantiated directly.
67
- Instead, use it as a base class and provide the specific provider name through inheritance or the factory function `_create_openai_compatible_model()`.
70
+ This class provides a foundation for integrating various LLM providers that
71
+ offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot,
72
+ and many others). It enhances the base OpenAI functionality by:
73
+
74
+ **1. Supports output of more types of reasoning content (reasoning_content)**
75
+ ChatOpenAI can only output reasoning content natively supported by official
76
+ OpenAI models, while OpenAICompatibleChatModel can output reasoning content
77
+ from other model providers (e.g., OpenRouter, vLLM).
78
+
79
+ **2. Dynamically adapts to choose the most suitable structured-output method**
80
+ OpenAICompatibleChatModel adds method="auto" (default), which selects the best
81
+ structured-output method (function_calling or json_schema) based on the actual
82
+ capabilities of the model provider.
83
+
84
+ **3. Supports configuration of related parameters**
85
+ For cases where parameters differ from the official OpenAI API, this library
86
+ provides the compatibility_options parameter to address this issue. For
87
+ example, when different model providers have inconsistent support for
88
+ tool_choice, you can adapt by setting supported_tool_choice in
89
+ compatibility_options.
90
+
91
+ Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class
92
+ extends capabilities to better support diverse OpenAI-compatible model
93
+ providers while maintaining full compatibility with LangChain's chat model
94
+ interface.
95
+
96
+ Note: This is a template class and should not be exported or instantiated
97
+ directly. Instead, use it as a base class and provide the specific provider
98
+ name through inheritance or the factory function
99
+ `_create_openai_compatible_model()`.
68
100
  """
69
101
 
70
102
  model_name: str = Field(alias="model", default="openai compatible model")
@@ -85,29 +117,13 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
85
117
  """Provider Compatibility Options"""
86
118
  supported_tool_choice: ToolChoiceType = Field(default_factory=list)
87
119
  """Supported tool choice"""
88
- keep_reasoning_content: bool = Field(default=False)
89
- """Whether to keep reasoning content in the messages"""
90
- support_json_mode: bool = Field(default=False)
91
- """Whether to support JSON mode"""
120
+ supported_response_format: ResponseFormatType = Field(default_factory=list)
121
+ """Supported response format"""
122
+ reasoning_content_keep_type: ReasoningContentKeepType = Field(default="discard")
123
+ """How to keep reasoning content in the messages"""
92
124
  include_usage: bool = Field(default=True)
93
125
  """Whether to include usage information in the output"""
94
126
 
95
- @property
96
- def _supported_tool_choice(self) -> ToolChoiceType:
97
- return self.supported_tool_choice
98
-
99
- @property
100
- def _keep_reasoning_content(self) -> bool:
101
- return self.keep_reasoning_content
102
-
103
- @property
104
- def _support_json_mode(self) -> bool:
105
- return self.support_json_mode
106
-
107
- @property
108
- def _include_usage(self) -> bool:
109
- return self.include_usage
110
-
111
127
  @property
112
128
  def _llm_type(self) -> str:
113
129
  return f"chat-{self._provider}"
@@ -142,14 +158,26 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
142
158
  kwargs["stop"] = stop
143
159
 
144
160
  payload_messages = []
161
+ last_human_index = -1
162
+ if self.reasoning_content_keep_type == "temp":
163
+ last_human_index = _get_last_human_message_index(messages)
145
164
 
146
- for m in messages:
165
+ for index, m in enumerate(messages):
147
166
  if isinstance(m, AIMessage):
148
167
  msg_dict = _convert_message_to_dict(
149
168
  _convert_from_v1_to_chat_completions(m)
150
169
  )
151
- if self._keep_reasoning_content and m.additional_kwargs.get(
152
- "reasoning_content"
170
+ if (
171
+ self.reasoning_content_keep_type == "retain"
172
+ and m.additional_kwargs.get("reasoning_content")
173
+ ):
174
+ msg_dict["reasoning_content"] = m.additional_kwargs.get(
175
+ "reasoning_content"
176
+ )
177
+ elif (
178
+ self.reasoning_content_keep_type == "temp"
179
+ and index > last_human_index
180
+ and m.additional_kwargs.get("reasoning_content")
153
181
  ):
154
182
  msg_dict["reasoning_content"] = m.additional_kwargs.get(
155
183
  "reasoning_content"
@@ -300,7 +328,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
300
328
  run_manager: Optional[CallbackManagerForLLMRun] = None,
301
329
  **kwargs: Any,
302
330
  ) -> Iterator[ChatGenerationChunk]:
303
- if self._include_usage:
331
+ if self.include_usage:
304
332
  kwargs["stream_options"] = {"include_usage": True}
305
333
  try:
306
334
  for chunk in super()._stream(
@@ -322,7 +350,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
322
350
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
323
351
  **kwargs: Any,
324
352
  ) -> AsyncIterator[ChatGenerationChunk]:
325
- if self._include_usage:
353
+ if self.include_usage:
326
354
  kwargs["stream_options"] = {"include_usage": True}
327
355
  try:
328
356
  async for chunk in super()._astream(
@@ -406,11 +434,11 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
406
434
  if isinstance(tool_choice, str):
407
435
  if (
408
436
  tool_choice in ["auto", "none", "required"]
409
- and tool_choice in self._supported_tool_choice
437
+ and tool_choice in self.supported_tool_choice
410
438
  ):
411
439
  support_tool_choice = True
412
440
 
413
- elif "specific" in self._supported_tool_choice:
441
+ elif "specific" in self.supported_tool_choice:
414
442
  if tool_choice in tool_names:
415
443
  support_tool_choice = True
416
444
  tool_choice = {
@@ -427,10 +455,11 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
427
455
  schema: Optional[_DictOrPydanticClass] = None,
428
456
  *,
429
457
  method: Literal[
458
+ "auto",
430
459
  "function_calling",
431
460
  "json_mode",
432
461
  "json_schema",
433
- ] = "function_calling",
462
+ ] = "auto",
434
463
  include_raw: bool = False,
435
464
  strict: Optional[bool] = None,
436
465
  **kwargs: Any,
@@ -444,7 +473,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
444
473
 
445
474
  Args:
446
475
  schema: Output schema (Pydantic model class or dictionary definition)
447
- method: Extraction method - defaults to function_calling for compatibility
476
+ method: Extraction method - defaults to auto,it will choice best method based on provider supported response format
448
477
  include_raw: Whether to include raw model response alongside parsed output
449
478
  strict: Schema enforcement strictness (provider-dependent)
450
479
  **kwargs: Additional structured output parameters
@@ -452,10 +481,23 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
452
481
  Returns:
453
482
  Runnable configured for structured output extraction
454
483
  """
455
- # Many providers do not support json_schema method, so fallback to function_calling
456
- if method == "json_schema":
484
+ if method not in ["auto", "function_calling", "json_mode", "json_schema"]:
485
+ raise ValueError(
486
+ f"Unsupported method: {method}. Please choose from 'auto', 'function_calling', 'json_mode', 'json_schema'."
487
+ )
488
+ if method == "auto":
489
+ if "json_schema" in self.supported_response_format:
490
+ method = "json_schema"
491
+ else:
492
+ method = "function_calling"
493
+ elif (
494
+ method == "json_schema"
495
+ and "json_schema" not in self.supported_response_format
496
+ ):
457
497
  method = "function_calling"
458
- if method == "json_mode" and not self._support_json_mode:
498
+ elif (
499
+ method == "json_mode" and "json_mode" not in self.supported_response_format
500
+ ):
459
501
  method = "function_calling"
460
502
 
461
503
  return super().with_structured_output(
@@ -486,8 +528,8 @@ def _create_openai_compatible_model(
486
528
  Configured model class ready for instantiation with provider-specific settings
487
529
  """
488
530
  chat_model_cls_name = f"Chat{provider.title()}"
489
-
490
- compatibility_options = compatibility_options or {}
531
+ if compatibility_options is None:
532
+ compatibility_options = {}
491
533
 
492
534
  return create_model(
493
535
  chat_model_cls_name,
@@ -516,13 +558,17 @@ def _create_openai_compatible_model(
516
558
  ToolChoiceType,
517
559
  Field(default=compatibility_options.get("supported_tool_choice", ["auto"])),
518
560
  ),
519
- keep_reasoning_content=(
520
- bool,
521
- Field(default=compatibility_options.get("keep_reasoning_content", False)),
561
+ reasoning_content_keep_type=(
562
+ ReasoningContentKeepType,
563
+ Field(
564
+ default=compatibility_options.get(
565
+ "reasoning_content_keep_type", "discard"
566
+ )
567
+ ),
522
568
  ),
523
- support_json_mode=(
524
- bool,
525
- Field(default=compatibility_options.get("support_json_mode", False)),
569
+ supported_response_format=(
570
+ ResponseFormatType,
571
+ Field(default=compatibility_options.get("supported_response_format", [])),
526
572
  ),
527
573
  include_usage=(
528
574
  bool,
@@ -5,7 +5,7 @@ from langchain_core.language_models.chat_models import BaseChatModel
5
5
  from langchain_core.utils import from_env
6
6
 
7
7
  from langchain_dev_utils._utils import (
8
- _check_langchain_openai_install,
8
+ _check_pkg_install,
9
9
  _get_base_url_field_name,
10
10
  )
11
11
 
@@ -95,11 +95,22 @@ def register_model_provider(
95
95
  string identifiers for supported providers.
96
96
 
97
97
  Args:
98
- provider_name: The name of the model provider, used as an identifier for loading models later.
99
- chat_model: The chat model, which can be either a `ChatModel` instance or a string (currently only `"openai-compatible"` is supported).
100
- base_url: The API endpoint URL of the model provider (optional; applicable to both `chat_model` types, but primarily used when `chat_model` is a string with value `"openai-compatible"`).
101
- model_profiles: Declares the capabilities and parameters supported by each model provided by this provider (optional; applicable to both `chat_model` types). The configuration corresponding to the `model_name` will be loaded and assigned to `model.profile` (e.g., fields such as `max_input_tokens`, `tool_calling`etc.).
102
- compatibility_options: Compatibility options for the model provider (optional; only effective when `chat_model` is a string with value `"openai-compatible"`). Used to declare support for OpenAI-compatible features (e.g., `tool_choice` strategies, JSON mode, etc.) to ensure correct functional adaptation.
98
+ provider_name: The name of the model provider, used as an identifier for
99
+ loading models later.
100
+ chat_model: The chat model, which can be either a `ChatModel` instance or
101
+ a string (currently only `"openai-compatible"` is supported).
102
+ base_url: The API endpoint URL of the model provider (optional; applicable
103
+ to both `chat_model` types, but primarily used when `chat_model` is a
104
+ string with value `"openai-compatible"`).
105
+ model_profiles: Declares the capabilities and parameters supported by each
106
+ model provided by this provider (optional; applicable to both `chat_model`
107
+ types). The configuration corresponding to the `model_name` will be loaded
108
+ and assigned to `model.profile` (e.g., fields such as `max_input_tokens`,
109
+ `tool_calling`etc.).
110
+ compatibility_options: Compatibility options for the model provider (optional;
111
+ only effective when `chat_model` is a string with value `"openai-compatible"`).
112
+ Used to declare support for OpenAI-compatible features (e.g., `tool_choice`
113
+ strategies, JSON mode, etc.) to ensure correct functional adaptation.
103
114
  Raises:
104
115
  ValueError: If base_url is not provided when chat_model is a string,
105
116
  or if chat_model string is not in supported providers
@@ -109,19 +120,23 @@ def register_model_provider(
109
120
  >>> from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
110
121
  >>> from langchain_core.language_models.fake_chat_models import FakeChatModel
111
122
  >>>
112
- >>> # Register custom model provider
123
+ # Register custom model provider
113
124
  >>> register_model_provider("fakechat", FakeChatModel)
114
125
  >>> model = load_chat_model(model="fakechat:fake-model")
115
126
  >>> model.invoke("Hello")
116
127
  >>>
117
- >>> # Using with OpenAI-compatible API:
118
- >>> register_model_provider("vllm","openai-compatible",base_url="http://localhost:8000/v1")
128
+ # Using with OpenAI-compatible API:
129
+ >>> register_model_provider(
130
+ ... provider_name="vllm",
131
+ ... chat_model="openai-compatible",
132
+ ... base_url="http://localhost:8000/v1",
133
+ ... )
119
134
  >>> model = load_chat_model(model="vllm:qwen3-4b")
120
135
  >>> model.invoke("Hello")
121
136
  """
122
137
  base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
123
138
  if isinstance(chat_model, str):
124
- _check_langchain_openai_install()
139
+ _check_pkg_install("langchain_openai")
125
140
  from .adapters.openai_compatible import _create_openai_compatible_model
126
141
 
127
142
  if base_url is None:
@@ -204,7 +219,7 @@ def batch_register_model_provider(
204
219
  >>> from langchain_dev_utils.chat_models import batch_register_model_provider, load_chat_model
205
220
  >>> from langchain_core.language_models.fake_chat_models import FakeChatModel
206
221
  >>>
207
- >>> # Register multiple providers
222
+ # Register multiple providers
208
223
  >>> batch_register_model_provider([
209
224
  ... {
210
225
  ... "provider_name": "fakechat",
@@ -217,7 +232,7 @@ def batch_register_model_provider(
217
232
  ... },
218
233
  ... ])
219
234
  >>>
220
- >>> # Use registered providers
235
+ # Use registered providers
221
236
  >>> model = load_chat_model("fakechat:fake-model")
222
237
  >>> model.invoke("Hello")
223
238
  >>>
@@ -257,16 +272,16 @@ def load_chat_model(
257
272
  BaseChatModel: Initialized chat model instance
258
273
 
259
274
  Example:
260
- Load model with provider prefix:
275
+ # Load model with provider prefix:
261
276
  >>> from langchain_dev_utils.chat_models import load_chat_model
262
277
  >>> model = load_chat_model("vllm:qwen3-4b")
263
278
  >>> model.invoke("hello")
264
279
 
265
- Load model with separate provider parameter:
280
+ # Load model with separate provider parameter:
266
281
  >>> model = load_chat_model("qwen3-4b", model_provider="vllm")
267
282
  >>> model.invoke("hello")
268
283
 
269
- Load model with additional parameters:
284
+ # Load model with additional parameters:
270
285
  >>> model = load_chat_model(
271
286
  ... "vllm:qwen3-4b",
272
287
  ... temperature=0.7
@@ -2,15 +2,18 @@ from typing import Literal, NotRequired, TypedDict, Union
2
2
 
3
3
  from langchain_core.language_models.chat_models import BaseChatModel
4
4
 
5
-
6
5
  ChatModelType = Union[type[BaseChatModel], Literal["openai-compatible"]]
7
6
 
8
7
 
9
8
  ToolChoiceType = list[Literal["auto", "none", "required", "specific"]]
10
9
 
10
+ ResponseFormatType = list[Literal["json_schema", "json_mode"]]
11
+
12
+ ReasoningContentKeepType = Literal["discard", "temp", "retain"]
13
+
11
14
 
12
15
  class CompatibilityOptions(TypedDict):
13
16
  supported_tool_choice: NotRequired[ToolChoiceType]
14
- keep_reasoning_content: NotRequired[bool]
15
- support_json_mode: NotRequired[bool]
17
+ supported_response_format: NotRequired[ResponseFormatType]
18
+ reasoning_content_keep_type: NotRequired[ReasoningContentKeepType]
16
19
  include_usage: NotRequired[bool]
@@ -1,10 +1,10 @@
1
1
  from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
2
2
 
3
- from langchain.embeddings.base import Embeddings, _SUPPORTED_PROVIDERS, init_embeddings
3
+ from langchain.embeddings.base import _SUPPORTED_PROVIDERS, Embeddings, init_embeddings
4
4
  from langchain_core.utils import from_env, secret_from_env
5
5
 
6
6
  from langchain_dev_utils._utils import (
7
- _check_langchain_openai_install,
7
+ _check_pkg_install,
8
8
  _get_base_url_field_name,
9
9
  )
10
10
 
@@ -60,14 +60,17 @@ def register_embeddings_provider(
60
60
 
61
61
  Args:
62
62
  provider_name: Name of the provider to register
63
- embeddings_model: Either an Embeddings class or a string identifier for a supported provider
64
- base_url: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
63
+ embeddings_model: Either an Embeddings class or a string identifier
64
+ for a supported provider
65
+ base_url: The API address of the Embedding model provider (optional,
66
+ valid for both types of `embeddings_model`, but mainly used when
67
+ `embeddings_model` is a string and is "openai-compatible")
65
68
 
66
69
  Raises:
67
70
  ValueError: If base_url is not provided when embeddings_model is a string
68
71
 
69
72
  Example:
70
- Register with custom model class:
73
+ # Register with custom model class:
71
74
  >>> from langchain_dev_utils.embeddings import register_embeddings_provider, load_embeddings
72
75
  >>> from langchain_core.embeddings.fake import FakeEmbeddings
73
76
  >>>
@@ -75,9 +78,11 @@ def register_embeddings_provider(
75
78
  >>> embeddings = load_embeddings("fakeembeddings:fake-embeddings",size=1024)
76
79
  >>> embeddings.embed_query("hello world")
77
80
 
78
- Register with OpenAI-compatible API:
81
+ # Register with OpenAI-compatible API:
79
82
  >>> register_embeddings_provider(
80
- ... "vllm", "openai-compatible", base_url="http://localhost:8000/v1"
83
+ ... "vllm",
84
+ ... "openai-compatible",
85
+ ... base_url="http://localhost:8000/v1"
81
86
  ... )
82
87
  >>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
83
88
  >>> embeddings.embed_query("hello world")
@@ -95,7 +100,7 @@ def register_embeddings_provider(
95
100
  "when embeddings_model is a string, the value must be 'openai-compatible'"
96
101
  )
97
102
 
98
- _check_langchain_openai_install()
103
+ _check_pkg_install("langchain_openai")
99
104
 
100
105
  _EMBEDDINGS_PROVIDERS_DICT.update(
101
106
  {
@@ -126,32 +131,44 @@ def batch_register_embeddings_provider(
126
131
  ):
127
132
  """Batch register embeddings providers.
128
133
 
129
- This function allows you to register multiple embeddings providers at once, which is
130
- useful when setting up applications that need to work with multiple embedding services.
134
+ This function allows you to register multiple embeddings providers at once,
135
+ which is useful when setting up applications that need to work with multiple
136
+ embedding services.
131
137
 
132
138
  Args:
133
139
  providers: List of EmbeddingProvider dictionaries, each containing:
134
140
  - provider_name: str - Provider name
135
- - embeddings_model: Union[Type[Embeddings], str] - Model class or provider string
136
- - base_url: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
141
+ - embeddings_model: Union[Type[Embeddings], str] - Model class or
142
+ provider string
143
+ - base_url: The API address of the Embedding model provider
144
+ (optional, valid for both types of `embeddings_model`, but
145
+ mainly used when `embeddings_model` is a string and is
146
+ "openai-compatible")
137
147
 
138
148
  Raises:
139
149
  ValueError: If any of the providers are invalid
140
150
 
141
151
  Example:
142
- Register multiple providers at once:
152
+ # Register multiple providers at once:
143
153
  >>> from langchain_dev_utils.embeddings import batch_register_embeddings_provider, load_embeddings
144
154
  >>> from langchain_core.embeddings.fake import FakeEmbeddings
145
155
  >>>
146
156
  >>> batch_register_embeddings_provider(
147
157
  ... [
148
- ... {"provider_name": "fakeembeddings", "embeddings_model": FakeEmbeddings},
149
- ... {"provider_name": "vllm", "embeddings_model": "openai-compatible", "base_url": "http://localhost:8000/v1"},
158
+ ... {
159
+ ... "provider_name": "fakeembeddings",
160
+ ... "embeddings_model": FakeEmbeddings,
161
+ ... },
162
+ ... {
163
+ ... "provider_name": "vllm",
164
+ ... "embeddings_model": "openai-compatible",
165
+ ... "base_url": "http://localhost:8000/v1"
166
+ ... },
150
167
  ... ]
151
168
  ... )
152
169
  >>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
153
170
  >>> embeddings.embed_query("hello world")
154
- >>> embeddings = load_embeddings("fakeembeddings:fake-embeddings",size=1024)
171
+ >>> embeddings = load_embeddings("fakeembeddings:fake-embeddings", size=1024)
155
172
  >>> embeddings.embed_query("hello world")
156
173
  """
157
174
  for provider in providers:
@@ -185,12 +202,12 @@ def load_embeddings(
185
202
  ValueError: If provider is not registered or API key is not found
186
203
 
187
204
  Example:
188
- Load model with provider prefix:
205
+ # Load model with provider prefix:
189
206
  >>> from langchain_dev_utils.embeddings import load_embeddings
190
207
  >>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
191
208
  >>> embeddings.embed_query("hello world")
192
209
 
193
- Load model with separate provider parameter:
210
+ # Load model with separate provider parameter:
194
211
  >>> embeddings = load_embeddings("qwen3-embedding-4b", provider="vllm")
195
212
  >>> embeddings.embed_query("hello world")
196
213
  """
@@ -6,7 +6,6 @@ from .content import (
6
6
  )
7
7
  from .format import format_sequence
8
8
 
9
-
10
9
  __all__ = [
11
10
  "convert_reasoning_content_for_ai_message",
12
11
  "convert_reasoning_content_for_chunk_iterator",
@@ -36,13 +36,13 @@ def convert_reasoning_content_for_ai_message(
36
36
  AIMessage: Modified AI message with reasoning content in visible content
37
37
 
38
38
  Example:
39
- Basic usage with default tags:
39
+ # Basic usage with default tags:
40
40
  >>> from langchain_dev_utils.message_convert import convert_reasoning_content_for_ai_message
41
41
  >>> response = model.invoke("Explain quantum computing")
42
42
  >>> response = convert_reasoning_content_for_ai_message(response)
43
43
  >>> response.content
44
44
 
45
- Custom tags for reasoning content:
45
+ # Custom tags for reasoning content:
46
46
  >>> response = convert_reasoning_content_for_ai_message(
47
47
  ... response, think_tag=('<reasoning>', '</reasoning>')
48
48
  ... )
@@ -77,14 +77,14 @@ def convert_reasoning_content_for_chunk_iterator(
77
77
  BaseMessageChunk: Modified message chunks with reasoning content
78
78
 
79
79
  Example:
80
- Process streaming response:
80
+ # Process streaming response:
81
81
  >>> from langchain_dev_utils.message_convert import convert_reasoning_content_for_chunk_iterator
82
82
  >>> for chunk in convert_reasoning_content_for_chunk_iterator(
83
83
  ... model.stream("What is the capital of France?")
84
84
  ... ):
85
85
  ... print(chunk.content, end="", flush=True)
86
86
 
87
- Custom tags for streaming:
87
+ # Custom tags for streaming:
88
88
  >>> for chunk in convert_reasoning_content_for_chunk_iterator(
89
89
  ... model.stream("Explain quantum computing"),
90
90
  ... think_tag=('<reasoning>', '</reasoning>')
@@ -127,14 +127,14 @@ async def aconvert_reasoning_content_for_chunk_iterator(
127
127
  BaseMessageChunk: Modified message chunks with reasoning content
128
128
 
129
129
  Example:
130
- Process async streaming response:
130
+ # Process async streaming response:
131
131
  >>> from langchain_dev_utils.message_convert import aconvert_reasoning_content_for_chunk_iterator
132
132
  >>> async for chunk in aconvert_reasoning_content_for_chunk_iterator(
133
133
  ... model.astream("What is the capital of France?")
134
134
  ... ):
135
135
  ... print(chunk.content, end="", flush=True)
136
136
 
137
- Custom tags for async streaming:
137
+ # Custom tags for async streaming:
138
138
  >>> async for chunk in aconvert_reasoning_content_for_chunk_iterator(
139
139
  ... model.astream("Explain quantum computing"),
140
140
  ... think_tag=('<reasoning>', '</reasoning>')
@@ -172,12 +172,9 @@ def merge_ai_message_chunk(chunks: Sequence[AIMessageChunk]) -> AIMessage:
172
172
  AIMessage: Merged AIMessage
173
173
 
174
174
  Example:
175
- Merge streaming chunks:
175
+ # Merge streaming chunks:
176
176
  >>> from langchain_dev_utils.message_convert import merge_ai_message_chunk
177
- >>> chunks = []
178
- >>> for chunk in model.stream("What is the capital of France?"):
179
- ... chunks.append(chunk)
180
- >>> merged_message = merge_ai_message_chunk(chunks)
177
+ >>> merged_message = merge_ai_message_chunk(list(model.stream("What is the capital of France?")))
181
178
  >>> merged_message.content
182
179
  """
183
180
  ai_message_chunk = cast(AIMessageChunk, reduce(lambda x, y: x + y, chunks))
@@ -34,7 +34,7 @@ def format_sequence(
34
34
  A formatted string composed of the input contents, joined by `separator`.
35
35
 
36
36
  Example:
37
- Format messages with default separator:
37
+ # Format messages with default separator:
38
38
  >>> from langchain_dev_utils.message_convert import format_sequence
39
39
  >>> from langchain_core.messages import HumanMessage, AIMessage
40
40
  >>> messages = [
@@ -44,7 +44,7 @@ def format_sequence(
44
44
  >>> formatted = format_sequence(messages)
45
45
  >>> formatted
46
46
 
47
- Format with custom separator and numbering:
47
+ # Format with custom separator and numbering:
48
48
  >>> formatted = format_sequence(messages, separator="---", with_num=True)
49
49
  >>> formatted
50
50
  """