langchain-dev-utils 1.2.7__tar.gz → 1.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/PKG-INFO +1 -1
  2. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/pyproject.toml +1 -1
  3. langchain_dev_utils-1.2.8/src/langchain_dev_utils/__init__.py +1 -0
  4. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/_utils.py +4 -3
  5. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +92 -62
  6. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/types.py +6 -2
  7. langchain_dev_utils-1.2.7/src/langchain_dev_utils/__init__.py +0 -1
  8. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/.gitignore +0 -0
  9. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/.python-version +0 -0
  10. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/.vscode/settings.json +0 -0
  11. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/LICENSE +0 -0
  12. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/README.md +0 -0
  13. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/README_cn.md +0 -0
  14. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/__init__.py +0 -0
  15. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/factory.py +0 -0
  16. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/file_system.py +0 -0
  17. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  18. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  19. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
  20. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  21. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  22. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +0 -0
  23. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  24. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  25. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/plan.py +0 -0
  26. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/agents/wrap.py +0 -0
  27. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  28. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  29. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/chat_models/base.py +0 -0
  30. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  31. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/embeddings/base.py +0 -0
  32. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  33. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/message_convert/content.py +0 -0
  34. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/message_convert/format.py +0 -0
  35. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  36. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  37. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  38. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/pipeline/types.py +0 -0
  39. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/py.typed +0 -0
  40. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  41. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  42. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  43. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/__init__.py +0 -0
  44. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_agent.py +0 -0
  45. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_chat_models.py +0 -0
  46. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_human_in_the_loop.py +0 -0
  47. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_load_embbeding.py +0 -0
  48. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_load_model.py +0 -0
  49. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_messages.py +0 -0
  50. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_model_tool_emulator.py +0 -0
  51. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_pipline.py +0 -0
  52. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_plan_middleware.py +0 -0
  53. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_router_model.py +0 -0
  54. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_tool_call_repair.py +0 -0
  55. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_tool_calling.py +0 -0
  56. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/tests/test_wrap_agent.py +0 -0
  57. {langchain_dev_utils-1.2.7 → langchain_dev_utils-1.2.8}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.7
3
+ Version: 1.2.8
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "langchain-dev-utils"
3
- version = "1.2.7"
3
+ version = "1.2.8"
4
4
  description = "A practical utility library for LangChain and LangGraph development"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "tiebingice", email = "tiebingice123@outlook.com" }]
@@ -0,0 +1 @@
1
+ __version__ = "1.2.8"
@@ -8,9 +8,10 @@ def _check_pkg_install(
8
8
  pkg: Literal["langchain_openai", "json_repair"],
9
9
  ) -> None:
10
10
  if not util.find_spec(pkg):
11
- msg = (
12
- "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
- )
11
+ if pkg == "langchain_openai":
12
+ msg = "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
+ else:
14
+ msg = "Please install langchain_dev_utils[standard] to use ToolCallRepairMiddleware."
14
15
  raise ImportError(msg)
15
16
 
16
17
 
@@ -20,7 +20,7 @@ from langchain_core.callbacks import (
20
20
  CallbackManagerForLLMRun,
21
21
  )
22
22
  from langchain_core.language_models import LangSmithParams, LanguageModelInput
23
- from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
23
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
24
24
  from langchain_core.outputs import ChatGenerationChunk, ChatResult
25
25
  from langchain_core.runnables import Runnable
26
26
  from langchain_core.tools import BaseTool
@@ -39,47 +39,63 @@ from pydantic import (
39
39
  )
40
40
  from typing_extensions import Self
41
41
 
42
- from ..types import CompatibilityOptions, ToolChoiceType
42
+ from ..types import (
43
+ CompatibilityOptions,
44
+ ReasoningContentKeepType,
45
+ ResponseFormatType,
46
+ ToolChoiceType,
47
+ )
43
48
 
44
49
  _BM = TypeVar("_BM", bound=BaseModel)
45
50
  _DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
46
51
  _DictOrPydantic = Union[dict, _BM]
47
52
 
48
53
 
54
+ def _get_last_human_message_index(messages: list[BaseMessage]) -> int:
55
+ """find the index of the last HumanMessage in the messages list, return -1 if not found."""
56
+ return next(
57
+ (
58
+ i
59
+ for i in range(len(messages) - 1, -1, -1)
60
+ if isinstance(messages[i], HumanMessage)
61
+ ),
62
+ -1,
63
+ )
64
+
65
+
49
66
  class _BaseChatOpenAICompatible(BaseChatOpenAI):
50
67
  """
51
68
  Base template class for OpenAI-compatible chat model implementations.
52
69
 
53
- This class provides a foundation for integrating various LLM providers that
54
- offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot,
70
+ This class provides a foundation for integrating various LLM providers that
71
+ offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot,
55
72
  and many others). It enhances the base OpenAI functionality by:
56
73
 
57
74
  **1. Supports output of more types of reasoning content (reasoning_content)**
58
- ChatOpenAI can only output reasoning content natively supported by official
59
- OpenAI models, while OpenAICompatibleChatModel can output reasoning content
60
- from other model providers (e.g., OpenRouter).
75
+ ChatOpenAI can only output reasoning content natively supported by official
76
+ OpenAI models, while OpenAICompatibleChatModel can output reasoning content
77
+ from other model providers (e.g., OpenRouter, vLLM).
61
78
 
62
- **2. Optimizes default behavior for structured output**
63
- When calling with_structured_output, the default value of the method
64
- parameter is adjusted to "function_calling" (instead of the default
65
- "json_schema" in ChatOpenAI), providing better compatibility with other
66
- models.
79
+ **2. Dynamically adapts to choose the most suitable structured-output method**
80
+ OpenAICompatibleChatModel adds method="auto" (default), which selects the best
81
+ structured-output method (function_calling or json_schema) based on the actual
82
+ capabilities of the model provider.
67
83
 
68
84
  **3. Supports configuration of related parameters**
69
- For cases where parameters differ from the official OpenAI API, this library
70
- provides the compatibility_options parameter to address this issue. For
71
- example, when different model providers have inconsistent support for
72
- tool_choice, you can adapt by setting supported_tool_choice in
85
+ For cases where parameters differ from the official OpenAI API, this library
86
+ provides the compatibility_options parameter to address this issue. For
87
+ example, when different model providers have inconsistent support for
88
+ tool_choice, you can adapt by setting supported_tool_choice in
73
89
  compatibility_options.
74
90
 
75
- Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class
76
- extends capabilities to better support diverse OpenAI-compatible model
77
- providers while maintaining full compatibility with LangChain's chat model
91
+ Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class
92
+ extends capabilities to better support diverse OpenAI-compatible model
93
+ providers while maintaining full compatibility with LangChain's chat model
78
94
  interface.
79
95
 
80
- Note: This is a template class and should not be exported or instantiated
81
- directly. Instead, use it as a base class and provide the specific provider
82
- name through inheritance or the factory function
96
+ Note: This is a template class and should not be exported or instantiated
97
+ directly. Instead, use it as a base class and provide the specific provider
98
+ name through inheritance or the factory function
83
99
  `_create_openai_compatible_model()`.
84
100
  """
85
101
 
@@ -101,29 +117,13 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
101
117
  """Provider Compatibility Options"""
102
118
  supported_tool_choice: ToolChoiceType = Field(default_factory=list)
103
119
  """Supported tool choice"""
104
- keep_reasoning_content: bool = Field(default=False)
105
- """Whether to keep reasoning content in the messages"""
106
- support_json_mode: bool = Field(default=False)
107
- """Whether to support JSON mode"""
120
+ supported_response_format: ResponseFormatType = Field(default_factory=list)
121
+ """Supported response format"""
122
+ reasoning_content_keep_type: ReasoningContentKeepType = Field(default="discard")
123
+ """How to keep reasoning content in the messages"""
108
124
  include_usage: bool = Field(default=True)
109
125
  """Whether to include usage information in the output"""
110
126
 
111
- @property
112
- def _supported_tool_choice(self) -> ToolChoiceType:
113
- return self.supported_tool_choice
114
-
115
- @property
116
- def _keep_reasoning_content(self) -> bool:
117
- return self.keep_reasoning_content
118
-
119
- @property
120
- def _support_json_mode(self) -> bool:
121
- return self.support_json_mode
122
-
123
- @property
124
- def _include_usage(self) -> bool:
125
- return self.include_usage
126
-
127
127
  @property
128
128
  def _llm_type(self) -> str:
129
129
  return f"chat-{self._provider}"
@@ -158,14 +158,26 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
158
158
  kwargs["stop"] = stop
159
159
 
160
160
  payload_messages = []
161
+ last_human_index = -1
162
+ if self.reasoning_content_keep_type == "temp":
163
+ last_human_index = _get_last_human_message_index(messages)
161
164
 
162
- for m in messages:
165
+ for index, m in enumerate(messages):
163
166
  if isinstance(m, AIMessage):
164
167
  msg_dict = _convert_message_to_dict(
165
168
  _convert_from_v1_to_chat_completions(m)
166
169
  )
167
- if self._keep_reasoning_content and m.additional_kwargs.get(
168
- "reasoning_content"
170
+ if (
171
+ self.reasoning_content_keep_type == "retain"
172
+ and m.additional_kwargs.get("reasoning_content")
173
+ ):
174
+ msg_dict["reasoning_content"] = m.additional_kwargs.get(
175
+ "reasoning_content"
176
+ )
177
+ elif (
178
+ self.reasoning_content_keep_type == "temp"
179
+ and index > last_human_index
180
+ and m.additional_kwargs.get("reasoning_content")
169
181
  ):
170
182
  msg_dict["reasoning_content"] = m.additional_kwargs.get(
171
183
  "reasoning_content"
@@ -316,7 +328,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
316
328
  run_manager: Optional[CallbackManagerForLLMRun] = None,
317
329
  **kwargs: Any,
318
330
  ) -> Iterator[ChatGenerationChunk]:
319
- if self._include_usage:
331
+ if self.include_usage:
320
332
  kwargs["stream_options"] = {"include_usage": True}
321
333
  try:
322
334
  for chunk in super()._stream(
@@ -338,7 +350,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
338
350
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
339
351
  **kwargs: Any,
340
352
  ) -> AsyncIterator[ChatGenerationChunk]:
341
- if self._include_usage:
353
+ if self.include_usage:
342
354
  kwargs["stream_options"] = {"include_usage": True}
343
355
  try:
344
356
  async for chunk in super()._astream(
@@ -422,11 +434,11 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
422
434
  if isinstance(tool_choice, str):
423
435
  if (
424
436
  tool_choice in ["auto", "none", "required"]
425
- and tool_choice in self._supported_tool_choice
437
+ and tool_choice in self.supported_tool_choice
426
438
  ):
427
439
  support_tool_choice = True
428
440
 
429
- elif "specific" in self._supported_tool_choice:
441
+ elif "specific" in self.supported_tool_choice:
430
442
  if tool_choice in tool_names:
431
443
  support_tool_choice = True
432
444
  tool_choice = {
@@ -443,10 +455,11 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
443
455
  schema: Optional[_DictOrPydanticClass] = None,
444
456
  *,
445
457
  method: Literal[
458
+ "auto",
446
459
  "function_calling",
447
460
  "json_mode",
448
461
  "json_schema",
449
- ] = "function_calling",
462
+ ] = "auto",
450
463
  include_raw: bool = False,
451
464
  strict: Optional[bool] = None,
452
465
  **kwargs: Any,
@@ -460,7 +473,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
460
473
 
461
474
  Args:
462
475
  schema: Output schema (Pydantic model class or dictionary definition)
463
- method: Extraction method - defaults to function_calling for compatibility
476
+ method: Extraction method - defaults to auto,it will choice best method based on provider supported response format
464
477
  include_raw: Whether to include raw model response alongside parsed output
465
478
  strict: Schema enforcement strictness (provider-dependent)
466
479
  **kwargs: Additional structured output parameters
@@ -468,10 +481,23 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
468
481
  Returns:
469
482
  Runnable configured for structured output extraction
470
483
  """
471
- # Many providers do not support json_schema method, so fallback to function_calling
472
- if method == "json_schema":
484
+ if method not in ["auto", "function_calling", "json_mode", "json_schema"]:
485
+ raise ValueError(
486
+ f"Unsupported method: {method}. Please choose from 'auto', 'function_calling', 'json_mode', 'json_schema'."
487
+ )
488
+ if method == "auto":
489
+ if "json_schema" in self.supported_response_format:
490
+ method = "json_schema"
491
+ else:
492
+ method = "function_calling"
493
+ elif (
494
+ method == "json_schema"
495
+ and "json_schema" not in self.supported_response_format
496
+ ):
473
497
  method = "function_calling"
474
- if method == "json_mode" and not self._support_json_mode:
498
+ elif (
499
+ method == "json_mode" and "json_mode" not in self.supported_response_format
500
+ ):
475
501
  method = "function_calling"
476
502
 
477
503
  return super().with_structured_output(
@@ -502,8 +528,8 @@ def _create_openai_compatible_model(
502
528
  Configured model class ready for instantiation with provider-specific settings
503
529
  """
504
530
  chat_model_cls_name = f"Chat{provider.title()}"
505
-
506
- compatibility_options = compatibility_options or {}
531
+ if compatibility_options is None:
532
+ compatibility_options = {}
507
533
 
508
534
  return create_model(
509
535
  chat_model_cls_name,
@@ -532,13 +558,17 @@ def _create_openai_compatible_model(
532
558
  ToolChoiceType,
533
559
  Field(default=compatibility_options.get("supported_tool_choice", ["auto"])),
534
560
  ),
535
- keep_reasoning_content=(
536
- bool,
537
- Field(default=compatibility_options.get("keep_reasoning_content", False)),
561
+ reasoning_content_keep_type=(
562
+ ReasoningContentKeepType,
563
+ Field(
564
+ default=compatibility_options.get(
565
+ "reasoning_content_keep_type", "discard"
566
+ )
567
+ ),
538
568
  ),
539
- support_json_mode=(
540
- bool,
541
- Field(default=compatibility_options.get("support_json_mode", False)),
569
+ supported_response_format=(
570
+ ResponseFormatType,
571
+ Field(default=compatibility_options.get("supported_response_format", [])),
542
572
  ),
543
573
  include_usage=(
544
574
  bool,
@@ -7,9 +7,13 @@ ChatModelType = Union[type[BaseChatModel], Literal["openai-compatible"]]
7
7
 
8
8
  ToolChoiceType = list[Literal["auto", "none", "required", "specific"]]
9
9
 
10
+ ResponseFormatType = list[Literal["json_schema", "json_mode"]]
11
+
12
+ ReasoningContentKeepType = Literal["discard", "temp", "retain"]
13
+
10
14
 
11
15
  class CompatibilityOptions(TypedDict):
12
16
  supported_tool_choice: NotRequired[ToolChoiceType]
13
- keep_reasoning_content: NotRequired[bool]
14
- support_json_mode: NotRequired[bool]
17
+ supported_response_format: NotRequired[ResponseFormatType]
18
+ reasoning_content_keep_type: NotRequired[ReasoningContentKeepType]
15
19
  include_usage: NotRequired[bool]
@@ -1 +0,0 @@
1
- __version__ = "1.2.7"